hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
033d682c669fb557f338c99f7669afd189b0d3ca | 8,274 | py | Python | web-testing/web_automate.py | Tak-Man/ML-rapid-text-labeling | c5fa5439bfcd3ba652be1dfdd101831fbd02d9d4 | [
"MIT"
] | null | null | null | web-testing/web_automate.py | Tak-Man/ML-rapid-text-labeling | c5fa5439bfcd3ba652be1dfdd101831fbd02d9d4 | [
"MIT"
] | null | null | null | web-testing/web_automate.py | Tak-Man/ML-rapid-text-labeling | c5fa5439bfcd3ba652be1dfdd101831fbd02d9d4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 08:58:37 2021
@author: michp-ai
"""
# This script is web automation for the Capstone project on ML rapid text labeling
# Before running this script in a different console start the web server by running main.py for the web app
# This is a simple demo script to illustrate how selenium interacts with the web app
#%%
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
from time import sleep
import datetime
#%%
#set a timer
starttime = datetime.datetime.now()
#%%
# PARAMETERS
mpath = os.getcwd() + "\chromedriver.exe"
wait_time = 0#.75 #0.75
scroll_wait_seconds = 0#1.75 #1.75
#%%
driver = webdriver.Chrome(mpath)
#%%
# load the webpage
driver.get("http://127.0.0.1:5000/")
driver.maximize_window()
#sleep(2) #for demo
#%%
# navigate landing page
driver.find_element_by_xpath('//*[@id="bodyLeftTable1"]/tbody/tr[1]/td[1]/a').click()
driver.find_element_by_id('config1').click()
driver.find_element_by_id('loadDataSetButton').click()
#%%
# identify radio buttons
def get_radio_buttons():
radio_buttons = []
radio_buttons.append(driver.find_element_by_id('category1'))
radio_buttons.append(driver.find_element_by_id('category2'))
radio_buttons.append(driver.find_element_by_id('category3'))
radio_buttons.append(driver.find_element_by_id('category4'))
return radio_buttons
def select_label_one_text(xpath, radio_button_id, wait_time=0.75):
# select a text from the list of all texts
driver.find_element_by_xpath(xpath).click()
# we select the correct radio button
radio_buttons = get_radio_buttons()
sleep(wait_time)
radio_buttons[radio_button_id].click()
# label one example
button_label_single = driver.find_element_by_id('labelButtonSingle')
sleep(wait_time)
button_label_single.click()
def click_difficult_texts(wait_time=0.75):
sleep(wait_time)
button_difficult_texts = driver.find_element_by_id('generateDifficultTextsButton')
button_difficult_texts.click()
def scroll_label_ten(radio_button_id, scroll_wait_seconds = 1.75):
#we scroll down the results list
for scr in range(2,10,2):
scr_xpath = '//*[@id="group1Table"]/tbody/tr[' + str(scr) + ']/td[1]/a'
print(scr_xpath)
link_scroll = driver.find_element_by_xpath(scr_xpath)
driver.execute_script("return arguments[0].scrollIntoView(true);", link_scroll)
sleep(scroll_wait_seconds)
radio_buttons = get_radio_buttons()
radio_buttons[radio_button_id].click()
sleep(wait_time)
# we apply a group label after checking all 10 suggested are correct
button_label_ten = driver.find_element_by_id('group1Button')
sleep(wait_time)
button_label_ten.click()
def search_phrase(phrase):
# Search for a phrase
phrase = "richter"
element = driver.find_element_by_id("searchAllTexts")
element.send_keys(phrase)
driver.find_element_by_id("searchAllTextsButton").click()
def search_label(phrases, reps, label_type):
for r in range(reps):
for k, v in phrases.items():
search_phrase(k)
if label_type=='single':
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[' + str (r+1) + ']/td[1]', v, wait_time=wait_time)
#%%
click_difficult_texts(wait_time=wait_time) # this should generate an error message under the labels
#%%
#we select a text Hurricane Dorian
driver.find_element_by_xpath('//*[@id="allTextsTable"]/tbody/tr[2]/td[1]/a').click()
#%%
# we select the Hurricane radio button
radio_buttons = get_radio_buttons()
sleep(wait_time)
radio_buttons[3].click()
#%%
# we label our first example
button_label_single = driver.find_element_by_id('labelButtonSingle')
sleep(wait_time)
button_label_single.click()
#%%
#we scroll down the results list
for scr in range(7,27,5):
scr_xpath = '//*[@id="allTextsTable"]/tbody/tr[' + str(scr) + ']/td[1]/a'
link_scroll = driver.find_element_by_xpath(scr_xpath)
driver.execute_script("return arguments[0].scrollIntoView(true);", link_scroll)
sleep(scroll_wait_seconds)
#%%
#we select a text about floods
driver.find_element_by_xpath('//*[@id="allTextsTable"]/tbody/tr[26]/td[1]/a').click()
#%%
# we select the flood radio button
radio_buttons = get_radio_buttons()
radio_buttons[2].click()
sleep(wait_time)
#%%
# we label our next example
button_label_single = driver.find_element_by_id('labelButtonSingle')
button_label_single.click()
sleep(wait_time)
#%%
# select another example - this time earthquake
driver.find_element_by_xpath('//*[@id="allTextsTable"]/tbody/tr[1]/td[1]/a').click()
#%%
# we select the flood radio button
radio_buttons = get_radio_buttons()
radio_buttons[0].click()
sleep(wait_time)
#%%
# we label our next example
button_label_single = driver.find_element_by_id('labelButtonSingle')
button_label_single.click()
sleep(wait_time)
#%%
# use the wrapper function to combine 3 steps needed to select 1. a text, 2. category then 3. apply label
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[1]/td[1]/a', 1, wait_time=wait_time)
# Label some more examples
#%%
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[4]/td[1]/a', 2, wait_time=wait_time)
#%%
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[1]/td[1]/a', 3, wait_time=wait_time)
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[5]/td[1]/a', 1, wait_time=wait_time)
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[2]/td[1]/a', 2, wait_time=wait_time)
#%%
select_label_one_text('//*[@id="allTextsTable"]/tbody/tr[2]/td[1]/a', 3, wait_time=wait_time)
#%%
driver.find_element_by_xpath('//*[@id="allTextsTable"]/tbody/tr[2]/td[1]/a').click()
#%%
#we scroll down the results list
for scr in range(2,10,2):
scr_xpath = '//*[@id="group1Table"]/tbody/tr[' + str(scr) + ']/td[1]/a'
link_scroll = driver.find_element_by_xpath(scr_xpath)
driver.execute_script("return arguments[0].scrollIntoView(true);", link_scroll)
sleep(scroll_wait_seconds)
#%%
# we select the flood radio button
radio_buttons = get_radio_buttons()
radio_buttons[2].click()
sleep(wait_time)
#%%
# we apply a group label after checking all 10 suggested are correct
button_label_ten = driver.find_element_by_id('group1Button')
sleep(wait_time)
button_label_ten.click()
#%%
# generate difficult texts
click_difficult_texts(wait_time=wait_time)
#%%
# select a text from the list of all texts
driver.find_element_by_xpath('//*[@id="difficultTextsTable"]/tbody/tr[8]/td[1]/a').click()
#%%
# Check 10 then group label 10
scroll_label_ten(0, scroll_wait_seconds=scroll_wait_seconds)
#%%
#wrap an action that we can repeat
sectionstarttime = datetime.datetime.now()
phrases = {
'richter': 0,
'smoke': 1,
'flooding': 2,
'mph': 3,
}
reps = 2
label_type = 'single'
search_label(phrases, reps, label_type)
sectionendtime = datetime.datetime.now()
elapsedsectiontime = sectionendtime - sectionstarttime
print("section time", elapsedsectiontime)
#%%
#phrases = {
# 'magnitude': 0,
# 'heat': 1,
# 'floods': 2,
# 'cyclone': 3,
# }
#search_label(phrases, reps, label_type)
#%%
# read the contents of the text
sectionstarttime = datetime.datetime.now()
phrases = {
'earthquake': 0,
'wildfire': 1,
'flooding': 2,
'hurricane': 3,
'richter': 0,
'smoke': 1,
'floods': 2,
'mph': 3,
'cyclone': 3,
'heat': 1,
'quake': 0,
}
for rrow in range(1,51):
xpath_base = '//*[@id="difficultTextsTable"]/tbody/tr[' + str(rrow) + ']/td['
tweet_text = driver.find_element_by_xpath(xpath_base + '2]').text
for k, v in phrases.items():
print(rrow, k)
if k in str.lower(tweet_text):
select_label_one_text(xpath_base + '1]/a', v, wait_time=wait_time)
break
# label based on text contents
sectionendtime = datetime.datetime.now()
elapsedsectiontime = sectionendtime - sectionstarttime
#%%
endtime = datetime.datetime.now()
elapsedtime = endtime - starttime
print("Elapsed time", elapsedtime) | 29.133803 | 123 | 0.708847 |
5e7589ead1adc5d0500dc2238af267855fccc73e | 9,666 | py | Python | Bitcoin_bot.py | Tunjii10/RL_Bitcoin_Bot | 6b048d8ced1f7ff1c9b5c9c5535aedf67ab1c4b5 | [
"MIT"
] | 2 | 2021-06-28T17:53:56.000Z | 2021-11-28T10:14:02.000Z | Bitcoin_bot.py | Tunjii10/RL_Bitcoin_Bot | 6b048d8ced1f7ff1c9b5c9c5535aedf67ab1c4b5 | [
"MIT"
] | null | null | null | Bitcoin_bot.py | Tunjii10/RL_Bitcoin_Bot | 6b048d8ced1f7ff1c9b5c9c5535aedf67ab1c4b5 | [
"MIT"
] | null | null | null | #import libraries
import os
import numpy as np
import pandas as pd
import random
import gym
from gym import spaces
from collections import deque
from datetime import datetime
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import A2C, PPO
from stable_baselines3.common.env_checker import check_env
import utility
#custom bitcoin env class
class BitcoinEnv(gym.Env):
def __init__(self, dataset_norm, dataset, initial_balance , lookback_window_size , env_steps_size):
#initialize
super(BitcoinEnv, self).__init__()
self.df = dataset_norm #normalized dataaset
self.df_original = dataset #non-normalized data for visualization
self.df_total_steps = len(self.df)-1
self.initial_balance = initial_balance
self.lookback_window_size = lookback_window_size
self.env_steps_size = env_steps_size
self.commission = 0.00001 # commission fees
self.columns = list(self.df.columns[:-1])
observation_length = len(self.columns)+ 5
#define action and observation space
self.action_space = spaces.MultiDiscrete([3, 11])
self.observation_space = spaces.Box(low =-1 , high =1,shape = (self.lookback_window_size, observation_length), dtype = np.float32)
# Orders history contains btc transactions history for the last lookback_window_size steps
self.orders_history = deque(maxlen=self.lookback_window_size)
# Market history contains the OHCL values for the last lookback_window_size prices
self.market_history = deque(maxlen=self.lookback_window_size)
#reset function
def reset(self):
self.visualization = utility.TradingGraph(Render_range=self.df_total_steps) # initialize visualization i.e trading graph
self.trades = [] # trades list for visualization
self.balance = self.initial_balance
self.net_worth = self.initial_balance
self.last_price = 0
self.btc_held = 0
self.btc_sold = 0
self.btc_bought = 0
self.last_balance = self.initial_balance
self.last_held = 0
#start and end step for train and test
if self.env_steps_size > 0: # used for training dataset
self.start_step = random.randint(self.lookback_window_size, self.df_total_steps - self.env_steps_size)
self.end_step = self.start_step + self.env_steps_size
else: # used for testing dataset
self.start_step = self.lookback_window_size
self.end_step = self.df_total_steps
self.current_step = self.start_step
#get data for lookback window
for i in reversed(range(self.lookback_window_size)):
current_step = self.current_step - i
#since orders history not norminalized we divide by 10000
self.orders_history.append([self.balance/10000, self.net_worth/10000, self.btc_bought/10000, self.btc_sold/10000, self.btc_held/10000])
self.market_history.append([self.df.loc[self.current_step, column] for column in self.columns
])
#concatenate market and orders history which becomes state
state = np.concatenate((self.market_history, self.orders_history), axis=1)
return state
#step function
def step(self, action):
#if current step > env end step(env_step size) or networth less or = o set done true
done = self.current_step == self.end_step or self.net_worth <= 0
Date = self.df_original.loc[self.current_step, 'Date'] # for visualization
High = self.df_original.loc[self.current_step, 'High'] # for visualization
Low = self.df_original.loc[self.current_step, 'Low'] # for visualization
self.btc_bought = 0
self.btc_sold = 0
#get action type and amount
action_type = action[0]
amount = (action[1]*10)/100
# Set the current price to a weighted price
current_price = self.df_original.loc[self.current_step, "Weighted_Price"]
reward = 0#set reward to 0
#if action type hold or amount 0(hold)
if action_type == 0 or amount ==0:
self.balance = self.last_balance
self.btc_held = self.last_held
self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': 0, 'percentage':amount, 'type': "hold"})
reward = (self.balance+(self.btc_held*current_price))-(self.last_balance+(self.last_held*self.last_price))#reward function
#else calculate transaction btc bought,sold, balance, held etc
elif (action_type == 1 and self.balance > 0) and amount>0:
self.btc_bought = self.balance / current_price * amount
self.balance -= self.btc_bought * current_price * (1 + self.commission)
self.btc_held += self.btc_bought
self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.btc_bought, 'percentage':amount, 'type': "buy"})
reward = (self.last_balance-self.balance+(self.last_held*current_price))-(self.last_balance+self.balance+(self.last_held*current_price))
elif (action_type == 2 and self.btc_held > 0) and amount>0:
self.btc_sold = self.btc_held * amount
self.balance += self.btc_sold * current_price * (1-self.commission)
self.btc_held -= self.btc_sold
self.trades.append({'Date' : Date, 'High' : High, 'Low' : Low, 'total': self.btc_sold, 'percentage':amount, 'type': "sell"})
reward = (self.last_balance+self.balance+(self.last_held*current_price))-(self.last_balance-self.balance+(self.last_held*current_price))
else:#else if we have less or equal to 0 btc or balance -> done
done = self.btc_held<= 0 or self.balance<=0
self.net_worth = self.balance + (self.btc_held * current_price)#calculate networth
#append orders history for next step
self.orders_history.append([self.balance/10000, self.net_worth/10000, self.btc_bought/10000, self.btc_sold/10000, self.btc_held/10000])
obs = self._next_observation()#get next observation ptss
self.past_step = self.current_step
#increment step
self.current_step += 1
self.last_price = current_price
self.last_balance = self.balance
self.last_held = self.btc_held
return obs, reward, done, {}
# Get the data points for next step
def _next_observation(self):
self.market_history.append([self.df.loc[self.current_step, column] for column in self.columns
])
obs = np.concatenate((self.market_history, self.orders_history), axis=1)
return obs
# render environment
def render(self, mode = "live"):
if mode == "live":
Date = self.df_original.loc[self.past_step, 'Date']
Open = self.df_original.loc[self.past_step, 'Open']
Close = self.df_original.loc[self.past_step, 'Close']
High = self.df_original.loc[self.past_step, 'High']
Low = self.df_original.loc[self.past_step, 'Low']
# Render the environment to the screen
self.visualization.render(Date, Open, High, Low, Close, self.net_worth, self.trades)
#normalize the data
def Normalizing(df_original):
df = df_original.copy()
column_names = df.columns.tolist()
for column in column_names[:-1]:
# Logging and Differencing
test = np.log(df[column]) - np.log(df[column].shift(1))
if test[1:].isnull().any():
df[column] = df[column] - df[column].shift(1)
else:
df[column] = np.log(df[column]) - np.log(df[column].shift(1))
# Min Max Scaler implemented
Min = df[column].min()
Max = df[column].max()
df[column] = (df[column] - Min) / (Max - Min)
return df
#import dataset
dataset = pd.read_csv("./datasets/bitcoin-historical-data/bitstampUSD_1-min_data_2012-01-01_to_2021-03-31.csv")
#portion of data for training and testing
dataset = dataset[4000000:4800000]
dataset = dataset.reset_index(drop = True)
#convert timestamp to datetime and drop unwanted columns
dataset['Date'] = [datetime.fromtimestamp(x) for x in dataset['Timestamp']]
dataset = dataset.drop([ "Volume_(Currency)", "Timestamp"], axis=1)
#darop na
dataset = dataset.dropna()
dataset_norm = Normalizing(dataset)
#remove first row due to Nan in norminalized data
dataset = dataset[1:].reset_index()
dataset_norm = dataset_norm[1:].reset_index()
#slice dataset for train and test and drop irrelevant columns
slice_point = int(len(dataset_norm) * (99.981/100))
train_df_norm = dataset_norm[:slice_point].drop(["index"], axis =1)
test_df_norm = dataset_norm[slice_point:].reset_index().drop([ "level_0","index"], axis =1)
train_df = dataset[:slice_point].drop(["index"], axis =1)
test_df = dataset[slice_point:].reset_index().drop([ "level_0","index"], axis =1)
# It will check your custom environment and output additional warnings if needed
env = BitcoinEnv(train_df_norm, train_df, initial_balance = 1000, lookback_window_size = 31,env_steps_size = 500)
check_env(env)
#create dummy vec env for train and test df
train_env = DummyVecEnv([lambda: BitcoinEnv(train_df_norm, train_df,
initial_balance = 5000, lookback_window_size = 60,env_steps_size = 1500)])
test_env = DummyVecEnv([lambda: BitcoinEnv(test_df_norm, test_df,
initial_balance = 5000, lookback_window_size = 60,env_steps_size = 0)])
#create instance of model for learning
model = PPO("MlpPolicy",
train_env,
#verbose=1,
#tensorboard_log="./tensorboard/"
)
model.learn(total_timesteps=200000)#train model
#test model on test dataset
obs = test_env.reset()
len_test_df = len(test_df)
for i in range(len_test_df-60):
action, _states = model.predict(obs)
obs, rewards, done, info = test_env.step(action)
test_env.render(mode = "live")
| 40.613445 | 142 | 0.698221 |
27cbe75738854cd43e4b3a28eb4c6650dcfd3a7f | 2,468 | py | Python | dizoo/atari/config/serial/qbert/qbert_acer_config.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | 1 | 2022-03-21T16:15:39.000Z | 2022-03-21T16:15:39.000Z | dizoo/atari/config/serial/qbert/qbert_acer_config.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | dizoo/atari/config/serial/qbert/qbert_acer_config.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | from easydict import EasyDict
qbert_acer_config = dict(
exp_name='qbert_acer_seed0',
env=dict(
collector_env_num=16,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=int(1e6),
env_id='QbertNoFrameskip-v4',
frame_stack=4,
manager=dict(shared_memory=False, )
),
policy=dict(
cuda=True,
priority=False,
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
encoder_hidden_size_list=[128, 128, 512],
critic_head_hidden_size=512,
critic_head_layer_num=2,
actor_head_hidden_size=512,
actor_head_layer_num=2
),
unroll_len=64,
learn=dict(
# (int) collect n_sample data, train model update_per_collect times
# here we follow impala serial pipeline
update_per_collect=10,
# (int) the number of data for a train iteration
batch_size=64,
# grad_clip_type='clip_norm',
learning_rate_actor=0.0001,
learning_rate_critic=0.0003,
# (float) loss weight of the entropy regularization, the weight of policy network is set to 1
entropy_weight=0.01,
# (float) discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
# (float) additional discounting parameter
trust_region=True,
# (float) clip ratio of importance weights
c_clip_ratio=10,
),
collect=dict(
# (int) collect n_sample data, train model n_iteration times
n_sample=64,
# (float) discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
collector=dict(collect_print_freq=1000, ),
),
eval=dict(evaluator=dict(eval_freq=1000, )),
other=dict(replay_buffer=dict(replay_buffer_size=3000, ), ),
),
)
main_config = EasyDict(qbert_acer_config)
qbert_acer_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='acer'),
)
create_config = EasyDict(qbert_acer_create_config)
if __name__ == "__main__":
# or you can enter ding -m serial -c qbert_acer_config.py -s 0
from ding.entry import serial_pipeline
serial_pipeline([main_config, create_config], seed=0)
| 34.277778 | 105 | 0.612237 |
f75238d6457c8357a5da67ab39ff93226c3f90cb | 1,266 | py | Python | checkov/cloudformation/checks/resource/aws/EKSSecretsEncryption.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | checkov/cloudformation/checks/resource/aws/EKSSecretsEncryption.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | checkov/cloudformation/checks/resource/aws/EKSSecretsEncryption.py | niradler/checkov | 2628c6f28a5604efe3877d6eacc3044d2b66b7b1 | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
class EKSSecretsEncryption(BaseResourceCheck):
def __init__(self):
name = "Ensure EKS Cluster has Secrets Encryption Enabled"
id = "CKV_AWS_58"
supported_resources = ['AWS::EKS::Cluster']
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
"""
Looks for eks secrets encryption
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-eks-cluster-encryptionconfig.html
:param conf: AWS::EKS::Cluster configuration
:return: <CheckResult>
"""
encryption_config = list(conf.get('Properties', {}).get('EncryptionConfig', []))
encryption_config_resources = [p["Resources"] for p in encryption_config if "Resources" in p]
if isinstance(encryption_config_resources, list) and any('secrets' in r for r in encryption_config_resources):
return CheckResult.PASSED
return CheckResult.FAILED
check = EKSSecretsEncryption()
| 45.214286 | 123 | 0.71564 |
f42a1d137e9b308cdc85df7f43b0e2fee0f1e57d | 20,294 | py | Python | lib/python2.7/site-packages/pip/_internal/commands/install.py | DPNT-Sourcecode/CHK-uimw01 | 87144ae10115d7a8df565f5109666f00bc001ce4 | [
"Apache-2.0"
] | 694 | 2018-11-30T01:06:30.000Z | 2022-03-31T14:46:26.000Z | virtual/lib/python3.6/site-packages/pip/_internal/commands/install.py | annstella/blog | 1cdb7e7e7df028a84fae9b7d901116aae577589d | [
"MIT"
] | 374 | 2015-12-25T05:38:28.000Z | 2022-03-03T05:03:36.000Z | lib/python2.7/site-packages/pip/_internal/commands/install.py | anish03/weather-dash | d517fa9da9028d1fc5d8fd71d77cee829ddee87b | [
"MIT"
] | 131 | 2016-03-30T09:13:35.000Z | 2022-01-24T10:30:18.000Z | from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
from optparse import SUPPRESS_HELP
from pip._vendor import pkg_resources
from pip._internal import cmdoptions
from pip._internal.basecommand import RequirementCommand
from pip._internal.cache import WheelCache
from pip._internal.exceptions import (
CommandError, InstallationError, PreviousBuildDirError,
)
from pip._internal.locations import distutils_scheme, virtualenv_no_global
from pip._internal.operations.check import check_install_conflicts
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req import RequirementSet, install_given_reqs
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.resolve import Resolver
from pip._internal.status_codes import ERROR
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import (
ensure_dir, get_installed_version,
protect_pip_from_modification_on_windows,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.wheel import WheelBuilder
try:
import wheel
except ImportError:
wheel = None
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
target_temp_dir = TempDirectory(kind="target")
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir.create()
install_options.append('--home=' + target_temp_dir.path)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with RequirementTracker() as req_tracker, TempDirectory(
options.build_dir, delete=build_delete, kind="install"
) as directory:
requirement_set = RequirementSet(
require_hashes=options.require_hashes,
)
try:
self.populate_requirement_set(
requirement_set, args, options, finder, session,
self.name, wheel_cache
)
preparer = RequirementPreparer(
build_dir=directory.path,
src_dir=options.src_dir,
download_dir=None,
wheel_download_dir=None,
progress_bar=options.progress_bar,
build_isolation=options.build_isolation,
req_tracker=req_tracker,
)
resolver = Resolver(
preparer=preparer,
finder=finder,
session=session,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
upgrade_strategy=upgrade_strategy,
force_reinstall=options.force_reinstall,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
ignore_installed=options.ignore_installed,
isolated=options.isolated_mode,
)
resolver.resolve(requirement_set)
protect_pip_from_modification_on_windows(
modifying_pip=requirement_set.has_requirement("pip")
)
# If caching is disabled or wheel is not installed don't
# try to build wheels.
if wheel and options.cache_dir:
# build wheels before install.
wb = WheelBuilder(
finder, preparer, wheel_cache,
build_options=[], global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(
requirement_set.requirements.values(),
session=session, autobuilding=True
)
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir.path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir.path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
reqs = sorted(installed, key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
wheel_cache.cleanup()
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return requirement_set
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
package_set, _dep_info = check_install_conflicts(to_install)
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
| 39.253385 | 79 | 0.54691 |
4152c222686b482f1356e479a78e5f795e8d4c3b | 74,100 | py | Python | pymeasure/instruments/ni/virtualbench.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | pymeasure/instruments/ni/virtualbench.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | pymeasure/instruments/ni/virtualbench.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | null | null | null | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
# pyvirtualbench library: Copyright (c) 2015 Charles Armstrap <charles@armstrap.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Requires 'pyvirtualbench' package:
# https://github.com/armstrap/armstrap-pyvirtualbench
import logging
import re
# ctypes only required for VirtualBench_Direct class
from ctypes import (c_bool, c_size_t, c_double, c_uint8, c_int32, c_uint32,
c_int64, c_uint64, c_wchar, c_wchar_p, Structure, c_int,
cdll, byref)
from datetime import datetime, timezone, timedelta
import numpy as np
import pandas as pd
from pymeasure.instruments import Instrument, RangeException
from pymeasure.instruments.validators import (strict_discrete_set,
truncated_discrete_set,
strict_range)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
try:
# Requires 'pyvirtualbench' package:
# https://github.com/armstrap/armstrap-pyvirtualbench
import pyvirtualbench as pyvb
except ModuleNotFoundError as err:
# catch here for logging
log.info('Failed loading the pyvirtualbench package. '
+ 'Check the NI VirtualBench documentation on how to '
+ 'install this external dependency. '
+ 'ImportError: {}'.format(err))
raise
class VirtualBench_Direct(pyvb.PyVirtualBench):
""" Represents National Instruments Virtual Bench main frame.
This class provides direct access to the armstrap/pyvirtualbench
Python wrapper.
"""
def __init__(self, device_name='', name='VirtualBench'):
''' Initialize the VirtualBench library. This must be called at least
once for the application. The 'version' parameter must be set to
the NIVB_LIBRARY_VERSION constant.
'''
self.device_name = device_name
self.name = name
self.nilcicapi = cdll.LoadLibrary("nilcicapi")
self.library_handle = c_int(0)
status = self.nilcicapi.niVB_Initialize(pyvb.NIVB_LIBRARY_VERSION,
byref(self.library_handle))
if (status != pyvb.Status.SUCCESS):
raise pyvb.PyVirtualBenchException(status, self.nilcicapi,
self.library_handle)
log.info("Initializing %s." % self.name)
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
self.release()
class VirtualBench():
""" Represents National Instruments Virtual Bench main frame.
Subclasses implement the functionalities of the different modules:
- Mixed-Signal-Oscilloscope (MSO)
- Digital Input Output (DIO)
- Function Generator (FGEN)
- Power Supply (PS)
- Serial Peripheral Interface (SPI) -> not implemented
for pymeasure yet
- Inter Integrated Circuit (I2C) -> not implemented for pymeasure yet
For every module exist methods to save/load the configuration to file.
These methods are not wrapped so far, checkout the pyvirtualbench file.
All calibration methods and classes are not wrapped so far, since these
are not required on a very regular basis.
Check the pyvirtualbench file, if you need the functionality.
:param str device_name: Full unique device name
:param str name: Name for display in pymeasure
"""
def __init__(self, device_name='', name='VirtualBench'):
''' Initialize the VirtualBench library. This must be called at least
once for the application. The 'version' parameter must be set to the
NIVB_LIBRARY_VERSION constant.
'''
self.device_name = device_name
self.name = name
self.vb = pyvb.PyVirtualBench(self.device_name)
log.info("Initializing %s." % self.name)
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.vb.library_handle is not None:
self.vb.release()
def shutdown(self):
''' Finalize the VirtualBench library.
'''
log.info("Shutting down %s" % self.name)
self.vb.release()
self.isShutdown = True
def get_library_version(self):
''' Return the version of the VirtualBench runtime library
'''
return self.vb.get_library_version()
def convert_timestamp_to_values(self, timestamp):
""" Converts a timestamp to seconds and fractional seconds
:param timestamp: VirtualBench timestamp
:type timestamp: pyvb.Timestamp
:return: (seconds_since_1970, fractional seconds)
:rtype: (int, float)
"""
if not isinstance(timestamp, pyvb.Timestamp):
raise ValueError("{0} is not a VirtualBench Timestamp object"
.format(timestamp))
return self.vb.convert_timestamp_to_values(timestamp)
def convert_values_to_timestamp(self, seconds_since_1970,
fractional_seconds):
""" Converts seconds and fractional seconds to a timestamp
:param seconds_since_1970: Date/Time in seconds since 1970
:type seconds_since_1970: int
:param fractional_seconds: Fractional seconds
:type fractional_seconds: float
:return: VirtualBench timestamp
:rtype: pyvb.Timestamp
"""
return self.vb.convert_values_to_timestamp(seconds_since_1970,
fractional_seconds)
def convert_values_to_datetime(self, timestamp):
""" Converts timestamp to datetime object
:param timestamp: VirtualBench timestamp
:type timestamp: pyvb.Timestamp
:return: Timestamp as DateTime object
:rtype: DateTime
"""
(seconds_since_1970,
fractional_seconds) = self.convert_timestamp_to_values(timestamp)
fractional_seconds = timedelta(seconds=fractional_seconds)
return (datetime.fromtimestamp(seconds_since_1970, timezone.utc) +
fractional_seconds)
def collapse_channel_string(self, names_in):
""" Collapses a channel string into a comma and colon-delimited
equivalent. Last element is the number of channels.
:param names_in: Channel string
:type names_in: str
:return: Channel string with colon notation where possible,
number of channels
:rtype: (str, int)
"""
if not isinstance(names_in, str):
raise ValueError("{0} is not a string".format(names_in))
return self.vb.collapse_channel_string(names_in)
def expand_channel_string(self, names_in):
""" Expands a channel string into a comma-delimited (no colon)
equivalent. Last element is the number of channels.
``'dig/0:2'`` -> ``('dig/0, dig/1, dig/2',3)``
:param names_in: Channel string
:type names_in: str
:return: Channel string with all channels separated by comma,
number of channels
:rtype: (str, int)
"""
return self.vb.expand_channel_string(names_in)
"""
Wrappers not implented yet:
- Handling Network Device
- Setting Calibration Information
def add_network_device(self, ip_or_hostname, timeout_in_ms):
''' Adds a networked device to the system.
'''
def remove_device(self, device_name = ''):
''' Removes a device from the system. The device must not be connected
via USB to be removed.
'''
def login(self, device_name = '', username = 'admin', password = ''):
''' Attempts to log in to a networked device. Logging in to
a device grants access to the permissions set for the
specified user in NI Web-Based Monitoring and Configuration.
'''
def logout(self, device_name = ''):
''' Logs out of a networked device that you are logged in to.
Logging out of a device revokes access to the permissions set
for the specified user in NI Web-Based Monitoring and
Configuration.
'''
def set_calibration_information(self, calibration_date,
calibration_interval, device_name = '',
password = ''):
''' Sets calibration information for the specified device.
'''
def set_calibration_password(self, current_password, new_password,
device_name = ''):
''' Sets a new calibration password for the specified device. This
method requires the current password for the device, and returns an
error if the specified password is incorrect.
'''
"""
def get_calibration_information(self):
""" Returns calibration information for the specified device,
including the last calibration date and calibration interval.
:return: Calibration date, recommended calibration interval in months,
calibration interval in months
:rtype: (pyvb.Timestamp, int, int)
"""
return self.vb.get_calibration_information(self.device_name)
def acquire_digital_input_output(self, lines, reset=False):
""" Establishes communication with the DIO module. This method should
be called once per session.
:param lines: Lines to acquire, reading is possible on all lines
:type lines: str
:param reset: Reset DIO module, defaults to False
:type reset: bool, optional
"""
reset = strict_discrete_set(reset, [True, False])
self.dio = self.DigitalInputOutput(self.vb, lines, reset, vb_name=self.name)
def acquire_power_supply(self, reset=False):
""" Establishes communication with the PS module. This method should be
called once per session.
:param reset: Reset the PS module, defaults to False
:type reset: bool, optional
"""
reset = strict_discrete_set(reset, [True, False])
self.ps = self.PowerSupply(self.vb, reset, vb_name=self.name)
def acquire_function_generator(self, reset=False):
""" Establishes communication with the FGEN module. This method should
be called once per session.
:param reset: Reset the FGEN module, defaults to False
:type reset: bool, optional
"""
reset = strict_discrete_set(reset, [True, False])
self.fgen = self.FunctionGenerator(self.vb, reset, vb_name=self.name)
def acquire_mixed_signal_oscilloscope(self, reset=False):
""" Establishes communication with the MSO module. This method should
be called once per session.
:param reset: Reset the MSO module, defaults to False
:type reset: bool, optional
"""
reset = strict_discrete_set(reset, [True, False])
self.mso = self.MixedSignalOscilloscope(self.vb, reset, vb_name=self.name)
def acquire_digital_multimeter(self, reset=False):
""" Establishes communication with the DMM module. This method should
be called once per session.
:param reset: Reset the DMM module, defaults to False
:type reset: bool, optional
"""
reset = strict_discrete_set(reset, [True, False])
self.dmm = self.DigitalMultimeter(self.vb, reset=reset, vb_name=self.name)
class DigitalInputOutput():
""" Represents Digital Input Output (DIO) Module of Virtual Bench
device. Allows to read/write digital channels and/or set channels
to export the start signal of FGEN module or trigger of MSO module.
"""
def __init__(self, virtualbench, lines, reset, vb_name=''):
""" Acquire DIO module
:param virtualbench: VirtualBench Instance
:type virtualbench: VirtualBench
:param lines: Lines to acquire
:type lines: str
:param reset: Rest DIO module
:type reset: bool
"""
# Parameters & Handle of VirtualBench Instance
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " DIO"
# Validate lines argument
# store line names & numbers for future reference
(self._line_names, self._line_numbers) = self.validate_lines(
lines, return_single_lines=True, validate_init=False)
# Create DIO Instance
log.info("Initializing %s." % self.name)
self.dio = self._vb_handle.acquire_digital_input_output(
self._line_names, reset)
self.isShutdown = False
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.isShutdown is not True:
self.dio.release()
def shutdown(self):
''' Stops the session and deallocates any resources acquired during
the session. If output is enabled on any channels, they remain
in their current state and continue to output data.
'''
log.info("Shutting down %s" % self.name)
self.dio.release()
self.isShutdown = True
def validate_lines(self, lines, return_single_lines=False,
validate_init=False):
""" Validate lines string
Allowed patterns (case sensitive):
- ``'VBxxxx-xxxxxxx/dig/0:7'``
- ``'VBxxxx-xxxxxxx/dig/0'``
- ``'dig/0'``
- ``'VBxxxx-xxxxxxx/trig'``
- ``'trig'``
Allowed Line Numbers: 0-7 or trig
:param lines: Line string to test
:type lines: str
:param return_single_lines: Return list of line numbers as well,
defaults to False
:type return_single_lines: bool, optional
:param validate_init: Check if lines are initialized (in
:code:`self._line_numbers`),
defaults to False
:type validate_init: bool, optional
:return: Line string, optional list of single line numbers
:rtype: str, optional (str, list)
"""
def error(lines=lines):
raise ValueError(
"Line specification {0} is not valid!".format(lines))
lines = self._vb_handle.expand_channel_string(lines)[0]
lines = lines.split(', ')
return_lines = []
single_lines = []
for line in lines:
if line == 'trig':
device = self._device_name
# otherwise (device_name/)dig/line or device_name/trig
else:
# split off line number by last '/'
try:
(device, line) = re.match(r'(.*)(?:/)(.+)', line).groups()
except IndexError:
error()
if (line == 'trig') and (device == self._device_name):
single_lines.append('trig')
return_lines.append(self._device_name + '/' + line)
elif int(line) in range(0, 8):
line = int(line)
single_lines.append(line)
# validate device name: either 'dig' or 'device_name/dig'
if device == 'dig':
pass
else:
try:
device = re.match(
r'(VB[0-9]{4}-[0-9a-zA-Z]{7})(?:/dig)',
device).groups()[0]
except (IndexError, KeyError):
error()
# device_name has to match
if not device == self._device_name:
error()
# constructing line references for output
return_lines.append((self._device_name + '/dig/%d') % line)
else:
error()
# check if lines are initialized
if validate_init is True:
if line not in self._line_numbers:
raise ValueError(
"Digital Line {} is not initialized".format(line))
# create comma separated channel string
return_lines = ', '.join(return_lines)
# collapse string if possible
return_lines = self._vb_handle.collapse_channel_string(
return_lines)[0] # drop number of lines
if return_single_lines is True:
return return_lines, single_lines
else:
return return_lines
def tristate_lines(self, lines):
''' Sets all specified lines to a high-impedance state. (Default)
'''
lines = self.validate_lines(lines, validate_init=True)
self.dio.tristate_lines(lines)
def export_signal(self, line, digitalSignalSource):
""" Exports a signal to the specified line.
:param line: Line string
:type line: str
:param digitalSignalSource: ``0`` for FGEN start or ``1``
for MSO trigger
:type digitalSignalSource: int
"""
line = self.validate_lines(line, validate_init=True)
digitalSignalSource_values = {"FGEN START": 0, "MSO TRIGGER": 1}
digitalSignalSource = strict_discrete_set(
digitalSignalSource.upper(), digitalSignalSource_values)
digitalSignalSource = digitalSignalSource_values[
digitalSignalSource.upper()]
self.dio.export_signal(line, digitalSignalSource)
def query_line_configuration(self):
''' Indicates the current line configurations. Tristate Lines,
Static Lines, and Export Lines contain comma-separated
range_data and/or colon-delimited lists of all acquired lines
'''
return self.dio.query_line_configuration()
def query_export_signal(self, line):
""" Indicates the signal being exported on the specified line.
:param line: Line string
:type line: str
:return: Exported signal (FGEN start or MSO trigger)
:rtype: enum
"""
line = self.validate_lines(line, validate_init=True)
return self.dio.query_export_signal(line)
def write(self, lines, data):
""" Writes data to the specified lines.
:param lines: Line string
:type lines: str
:param data: List of data, (``True`` = High, ``False`` = Low)
:type data: list or tuple
"""
lines = self.validate_lines(lines, validate_init=True)
try:
for value in data:
strict_discrete_set(value, [True, False])
except Exception:
raise ValueError(
"Data {} is not iterable (list or tuple).".format(data))
log.debug("{}: {} output {}.".format(self.name, lines, data))
self.dio.write(lines, data)
def read(self, lines):
""" Reads the current state of the specified lines.
:param lines: Line string, requires full name specification e.g.
``'VB8012-xxxxxxx/dig/0:7'`` since instrument_handle
is not required (only library_handle)
:type lines: str
:return: List of line states (HIGH/LOW)
:rtype: list
"""
lines = self.validate_lines(lines, validate_init=False)
# init not necessary for readout
return self.dio.read(lines)
def reset_instrument(self):
''' Resets the session configuration to default values, and
resets the device and driver software to a known state.
'''
self.dio.reset_instrument()
class DigitalMultimeter():
""" Represents Digital Multimeter (DMM) Module of Virtual Bench
device. Allows to measure either DC/AC voltage or current,
Resistance or Diodes.
"""
def __init__(self, virtualbench, reset, vb_name=''):
""" Acquire DMM module
:param virtualbench: Instance of the VirtualBench class
:type virtualbench: VirtualBench
:param reset: Resets the instrument
:type reset: bool
"""
# Parameters & Handle of VirtualBench Instance
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " DMM"
log.info("Initializing %s." % self.name)
self.dmm = self._vb_handle.acquire_digital_multimeter(
self._device_name, reset)
self.isShutdown = False
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.isShutdown is not True:
self.dmm.release()
def shutdown(self):
""" Stops the DMM session and deallocates any resources
acquired during the session.
"""
log.info("Shutting down %s" % self.name)
self.dmm.release()
self.isShutdown = True
@staticmethod
def validate_range(dmm_function, range):
""" Checks if ``range`` is valid for the chosen ``dmm_function``
:param int dmm_function: DMM Function
:param range: Range value, e.g. maximum value to measure
:type range: int or float
:return: Range value to pass to instrument
:rtype: int
"""
ref_ranges = {
0: [0.1, 1, 10, 100, 300],
1: [0.1, 1, 10, 100, 265],
2: [0.01, 0.1, 1, 10],
3: [0.005, 0.05, 0.5, 5],
4: [100, 1000, 10000, 100000, 1000000,
10000000, 100000000],
}
range = truncated_discrete_set(range, ref_ranges[dmm_function])
return range
def validate_dmm_function(self, dmm_function):
""" Check if DMM function *dmm_function* exists
:param dmm_function: DMM function index or name:
- ``'DC_VOLTS'``, ``'AC_VOLTS'``
- ``'DC_CURRENT'``, ``'AC_CURRENT'``
- ``'RESISTANCE'``
- ``'DIODE'``
:type dmm_function: int or str
:return: DMM function index to pass to the instrument
:rtype: int
"""
try:
pyvb.DmmFunction(dmm_function)
except Exception:
try:
dmm_function = pyvb.DmmFunction[dmm_function.Upper()]
except Exception:
raise ValueError(
"DMM Function may be 0-5, 'DC_VOLTS'," +
" 'AC_VOLTS', 'DC_CURRENT', 'AC_CURRENT'," +
" 'RESISTANCE' or 'DIODE'")
return dmm_function
def validate_auto_range_terminal(self, auto_range_terminal):
""" Check value for choosing the auto range terminal for
DC current measurement
:param auto_range_terminal: Terminal to perform
auto ranging (``'LOW'``
or ``'HIGH'``)
:type auto_range_terminal: int or str
:return: Auto range terminal to pass to the instrument
:rtype: int
"""
try:
pyvb.DmmCurrentTerminal(auto_range_terminal)
except Exception:
try:
auto_range_terminal = pyvb.DmmCurrentTerminal[
auto_range_terminal.Upper()]
except Exception:
raise ValueError(
"Current Auto Range Terminal may be 0, 1," +
" 'LOW' or 'HIGH'")
return auto_range_terminal
def configure_measurement(self, dmm_function, auto_range=True,
manual_range=1.0):
""" Configure Instrument to take a DMM measurement
:param dmm_function:DMM function index or name:
- ``'DC_VOLTS'``, ``'AC_VOLTS'``
- ``'DC_CURRENT'``, ``'AC_CURRENT'``
- ``'RESISTANCE'``
- ``'DIODE'``
:type dmm_function: int or str
:param bool auto_range: Enable/Disable auto ranging
:param float manual_range: Manually set measurement range
"""
dmm_function = self.validate_dmm_function(dmm_function)
auto_range = strict_discrete_set(auto_range, [True, False])
if auto_range is False:
manual_range = self.validate_range(dmm_function, range)
self.dmm.configure_measurement(
dmm_function, auto_range=auto_range, manual_range=manual_range)
def configure_dc_voltage(self, dmm_input_resistance):
""" Configure DC voltage input resistance
:param dmm_input_resistance: Input resistance (``'TEN_MEGA_OHM'``
or ``'TEN_GIGA_OHM'``)
:type dmm_input_resistance: int or str
"""
try:
pyvb.DmmInputResistance(dmm_input_resistance)
except Exception:
try:
dmm_input_resistance = pyvb.DmmInputResistance[
dmm_input_resistance.Upper()]
except Exception:
raise ValueError(
"Input Resistance may be 0, 1," +
" 'TEN_MEGA_OHM' or 'TEN_GIGA_OHM'")
self.dmm.configure_dc_voltage(dmm_input_resistance)
def configure_dc_current(self, auto_range_terminal):
""" Configure auto rage terminal for DC current measurement
:param auto_range_terminal: Terminal to perform auto ranging
(``'LOW'`` or ``'HIGH'``)
"""
auto_range_terminal = self.validate_auto_range_terminal(
auto_range_terminal)
self.dmm.configure_dc_current(auto_range_terminal)
def configure_ac_current(self, auto_range_terminal):
""" Configure auto rage terminal for AC current measurement
:param auto_range_terminal: Terminal to perform auto ranging
(``'LOW'`` or ``'HIGH'``)
"""
auto_range_terminal = self.validate_auto_range_terminal(
auto_range_terminal)
self.dmm.configure_ac_current(auto_range_terminal)
def query_measurement(self):
""" Query DMM measurement settings from the instrument
:return: Auto range, range data
:rtype: (bool, float)
"""
return self.dmm.query_measurement(0)
def query_dc_voltage(self):
""" Indicates input resistance setting for DC voltage measurement
"""
self.dmm.query_dc_voltage()
def query_dc_current(self):
""" Indicates auto range terminal for DC current measurement
"""
self.dmm.query_dc_current()
def query_ac_current(self):
""" Indicates auto range terminal for AC current measurement
"""
self.dmm.query_ac_current()
def read(self):
""" Read measurement value from the instrument
:return: Measurement value
:rtype: float
"""
self.dmm.read()
def reset_instrument(self):
""" Reset the DMM module to defaults
"""
self.dmm.reset_instrument()
class FunctionGenerator():
""" Represents Function Generator (FGEN) Module of Virtual
Bench device.
"""
def __init__(self, virtualbench, reset, vb_name=''):
""" Acquire FGEN module
:param virtualbench: Instance of the VirtualBench class
:type virtualbench: VirtualBench
:param reset: Resets the instrument
:type reset: bool
"""
# Parameters & Handle of VirtualBench Instance
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " FGEN"
log.info("Initializing %s." % self.name)
self.fgen = self._vb_handle.acquire_function_generator(
self._device_name, reset)
self._waveform_functions = {"SINE": 0, "SQUARE": 1,
"TRIANGLE/RAMP": 2, "DC": 3}
# self._waveform_functions_index = {
# v: k for k, v in self._waveform_functions.items()}
self._max_frequency = {"SINE": 20000000, "SQUARE": 5000000,
"TRIANGLE/RAMP": 1000000, "DC": 20000000}
self.isShutdown = False
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.isShutdown is not True:
self.fgen.release()
def shutdown(self):
''' Stops the session and deallocates any resources acquired during
the session. If output is enabled on any channels, they remain
in their current state and continue to output data.
'''
log.info("Shutting down %s" % self.name)
self.fgen.release()
self.isShutdown = True
def configure_standard_waveform(self, waveform_function, amplitude,
dc_offset, frequency, duty_cycle):
""" Configures the instrument to output a standard waveform.
Check instrument manual for maximum ratings which depend on load.
:param waveform_function: Waveform function (``"SINE", "SQUARE",
"TRIANGLE/RAMP", "DC"``)
:type waveform_function: int or str
:param amplitude: Amplitude in volts
:type amplitude: float
:param dc_offset: DC offset in volts
:type dc_offset: float
:param frequency: Frequency in Hz
:type frequency: float
:param duty_cycle: Duty cycle in %
:type duty_cycle: int
"""
waveform_function = strict_discrete_set(
waveform_function.upper(), self._waveform_functions)
max_frequency = self._max_frequency[waveform_function.upper()]
waveform_function = self._waveform_functions[
waveform_function.upper()]
amplitude = strict_range(
amplitude, [x/100 for x in range(0, 2401)])
dc_offset = strict_range(
dc_offset, [x/100 for x in range(-1201, 1201)])
if (amplitude/2 + abs(dc_offset)) > 12:
raise ValueError(
"Amplitude and DC Offset may not exceed +/-12V")
duty_cycle = strict_range(duty_cycle, range(0, 101))
frequency = strict_range(
frequency, [x/1000 for x in range(0, max_frequency*1000 + 1)])
self.fgen.configure_standard_waveform(
waveform_function, amplitude, dc_offset, frequency, duty_cycle)
def configure_arbitrary_waveform(self, waveform, sample_period):
""" Configures the instrument to output a waveform. The waveform is
output either after the end of the current waveform if output
is enabled, or immediately after output is enabled.
:param waveform: Waveform as list of values
:type waveform: list
:param sample_period: Time between two waveform points
(maximum of 125MS/s, which equals 80ns)
:type sample_period: float
"""
strict_range(len(waveform), range(1, 1000001)) # 1MS
if not ((sample_period >= 8e-8) and (sample_period <= 1)):
raise ValueError(
"Sample Period allows a maximum of 125MS/s (80ns)")
self.fgen.configure_arbitrary_waveform(waveform, sample_period)
def configure_arbitrary_waveform_gain_and_offset(self, gain,
dc_offset):
""" Configures the instrument to output an arbitrary waveform with
a specified gain and offset value. The waveform is output either
after the end of the current waveform if output is enabled, or
immediately after output is enabled.
:param gain: Gain, multiplier of waveform values
:type gain: float
:param dc_offset: DC offset in volts
:type dc_offset: float
"""
dc_offset = strict_range(
dc_offset, [x/100 for x in range(-1201, 1201)])
self.fgen.configure_arbitrary_waveform_gain_and_offset(
gain, dc_offset)
@property
def filter(self):
''' Enables or disables the filter on the instrument.
:param bool enable_filter: Enable/Disable filter
'''
return self.fgen.query_filter
@filter.setter
def filter(self, enable_filter):
enable_filter = strict_discrete_set(enable_filter, [True, False])
self.fgen.enable_filter(enable_filter)
# def enable_filter(self, enable_filter):
# ''' Enables or disables the filter on the instrument.
# '''
# enable_filter = strict_discrete_set(enable_filter,[True,False])
# self.fgen.enable_filter(enable_filter)
# def query_filter(self):
# ''' Indicates whether the filter is enabled on the instrument.
# '''
# self.fgen.query_filter()
def query_waveform_mode(self):
""" Indicates whether the waveform output by the instrument is a
standard or arbitrary waveform.
:return: Waveform mode
:rtype: enum
"""
return self.fgen.query_waveform_mode()
def query_standard_waveform(self):
""" Returns the settings for a standard waveform generation.
:return: Waveform function, amplitude, dc_offset, frequency,
duty_cycle
:rtype: (enum, float, float, float, int)
"""
return self.fgen.query_standard_waveform()
def query_arbitrary_waveform(self):
""" Returns the samples per second for arbitrary waveform
generation.
:return: Samples per second
:rtype: int
"""
return self.fgen.query_arbitrary_waveform()
def query_arbitrary_waveform_gain_and_offset(self):
""" Returns the settings for arbitrary waveform generation that
includes gain and offset settings.
:return: Gain, DC offset
:rtype: (float, float)
"""
return self.fgen.query_arbitrary_waveform_gain_and_offset()
def query_generation_status(self):
""" Returns the status of waveform generation on the instrument.
:return: Status
:rtype: enum
"""
return self.fgen.query_generation_status()
def run(self):
''' Transitions the session from the Stopped state to the Running
state.
'''
log.info("%s START" % self.name)
self.fgen.run()
def self_calibrate(self):
'''Performs offset nulling calibration on the device. You must run
FGEN Initialize prior to running this method.
'''
self.fgen.self_calibrate()
def stop(self):
''' Transitions the acquisition from either the Triggered or
Running state to the Stopped state.
'''
log.info("%s STOP" % self.name)
self.fgen.stop()
def reset_instrument(self):
''' Resets the session configuration to default values, and resets
the device and driver software to a known state.
'''
self.fgen.reset_instrument()
class MixedSignalOscilloscope():
""" Represents Mixed Signal Oscilloscope (MSO) Module of Virtual Bench
device. Allows to measure oscilloscope data from analog and digital
channels.
"""
def __init__(self, virtualbench, reset, vb_name=''):
""" Acquire FGEN module
:param virtualbench: Instance of the VirtualBench class
:type virtualbench: VirtualBench
:param reset: Resets the instrument
:type reset: bool
"""
# Parameters & Handle of VirtualBench Instance
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " MSO"
log.info("Initializing %s." % self.name)
self.mso = self._vb_handle.acquire_mixed_signal_oscilloscope(
self._device_name, reset)
self.isShutdown = False
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.isShutdown is not True:
self.mso.release()
def shutdown(self):
''' Removes the session and deallocates any resources acquired
during the session. If output is enabled on any channels, they
remain in their current state.
'''
log.info("Shutting down %s" % self.name)
self.mso.release()
self.isShutdown = True
@staticmethod
def validate_trigger_instance(trigger_instance):
""" Check if ``trigger_instance`` is a valid choice
:param trigger_instance: Trigger instance (``'A'`` or ``'B'``)
:type trigger_instance: int or str
:return: Trigger instance
:rtype: int
"""
try:
pyvb.MsoTriggerInstance(trigger_instance)
except Exception:
try:
trigger_instance = pyvb.MsoTriggerInstance[
trigger_instance.Upper()]
except Exception:
raise ValueError(
"Trigger Instance may be 0, 1, 'A' or 'B'")
return trigger_instance
def auto_setup(self):
""" Automatically configure the instrument
"""
self.mso.auto_setup()
def validate_channel(self, channel):
""" Check if ``channel`` is a correct specification
:param str channel: Channel string
:return: Channel string
:rtype: str
"""
def error(channel=channel):
raise ValueError(
"Channel specification {0} is not valid!".format(channel))
channels = self._vb_handle.expand_channel_string(channel)[0]
channels = channels.split(', ')
return_value = []
for channel in channels:
# split off lines by last '/'
try:
(device, channel) = re.match(
r'(.*)(?:/)(.+)', channel).groups()
except Exception:
error()
# validate numbers in range 1-2
if not int(channel) in range(1, 3):
error()
# validate device name: either 'mso' or 'device_name/mso'
if device == 'mso':
pass
else:
try:
device = re.match(
r'(VB[0-9]{4}-[0-9a-zA-Z]{7})(?:/)(.+)',
device).groups()[0]
except Exception:
error()
# device_name has to match
if not device == self._device_name:
error()
# constructing line references for output
return_value.append('mso/' + channel)
return_value = ', '.join(return_value)
return_value = self._vb_handle.collapse_channel_string(
return_value)
return return_value
def configure_analog_channel(self, channel, enable_channel,
vertical_range, vertical_offset,
probe_attenuation, vertical_coupling):
""" Configure analog measurement channel
:param str channel: Channel string
:param bool enable_channel: Enable/Disable channel
:param float vertical_range: Vertical measurement range (0V - 20V)
:param float vertical_offset: Vertical offset to correct for
(inverted compared to VB native UI,
-20V - +20V)
:param probe_attenuation: Probe attenuation (``'ATTENUATION_10X'``
or ``'ATTENUATION_1X'``)
:type probe_attenuation: int or str
:param vertical_coupling: Vertical coupling (``'AC'`` or ``'DC'``)
:type vertical_coupling: int or str
"""
channel = self.validate_channel(channel)
enable_channel = strict_discrete_set(
enable_channel, [True, False])
if (vertical_range < 0) or (vertical_range > 20):
raise ValueError("Vertical Range takes value 0 to 20V")
if (vertical_offset < -20) or (vertical_offset > 20):
raise ValueError("Vertical Offset takes value -20 to +20V")
try:
pyvb.MsoProbeAttenuation(probe_attenuation)
except Exception:
try:
probe_attenuation = pyvb.MsoProbeAttenuation[
probe_attenuation.Upper()]
except Exception:
raise ValueError(
"Probe Attenuation may be 1, 10," +
" 'ATTENUATION_10X' or 'ATTENUATION_1X'")
try:
pyvb.MsoCoupling(vertical_coupling)
except Exception:
try:
vertical_coupling = pyvb.MsoCoupling[
vertical_coupling.Upper()]
except Exception:
raise ValueError(
"Probe Attenuation may be 0, 1, 'AC' or 'DC'")
self.mso.configure_analog_channel(
channel, enable_channel, vertical_range, vertical_offset,
probe_attenuation, vertical_coupling)
def configure_analog_channel_characteristics(self, channel,
input_impedance,
bandwidth_limit):
""" Configure electrical characteristics of the specified channel
:param str channel: Channel string
:param input_impedance: Input Impedance (``'ONE_MEGA_OHM'`` or
``'FIFTY_OHMS'``)
:type input_impedance: int or str
:param int bandwidth_limit: Bandwidth limit (100MHz or 20MHz)
"""
channel = self.validate_channel(channel)
try:
pyvb.MsoInputImpedance(input_impedance)
except Exception:
try:
input_impedance = pyvb.MsoInputImpedance[
input_impedance.Upper()]
except Exception:
raise ValueError(
"Probe Attenuation may be 0, 1," +
" 'ONE_MEGA_OHM' or 'FIFTY_OHMS'")
bandwidth_limit = strict_discrete_set(
bandwidth_limit, [100000000, 20000000]) # 100 Mhz or 20Mhz
self.mso.configure_analog_channel_characteristics(
channel, input_impedance, bandwidth_limit)
# def enable_digital_channels(self, channel, enable_channel):
# ''' Enables or disables the specified digital channels.
# '''
# def configure_digital_threshold(self, threshold):
# ''' Configures the threshold level for logic analyzer lines.
# '''
def configure_timing(self, sample_rate, acquisition_time,
pretrigger_time, sampling_mode):
""" Configure timing settings of the MSO
:param int sample_rate: Sample rate (15.26kS - 1MS)
:param float acquisition_time: Acquisition time (1ns - 68.711s)
:param float pretrigger_time: Pretrigger time (0s - 10s)
:param sampling_mode: Sampling mode (``'SAMPLE'`` or
``'PEAK_DETECT'``)
"""
sample_rate = strict_range(sample_rate, range(15260, 1000000001))
if not ((acquisition_time >= 1e-09) and
(acquisition_time <= 68.711)):
raise ValueError(
"Acquisition Time must be between 1ns and 68.7s")
# acquisition is also limited by buffer size,
# which depends on sample rate as well as acquisition time
if not ((pretrigger_time >= 0) and (pretrigger_time <= 10)):
raise ValueError("Pretrigger Time must be between 1ns and 10s")
try:
pyvb.MsoSamplingMode(sampling_mode)
except Exception:
try:
sampling_mode = pyvb.MsoSamplingMode[sampling_mode.Upper()]
except Exception:
raise ValueError(
"Sampling Mode may be 0, 1, 'SAMPLE' or 'PEAK_DETECT'")
self.mso.configure_timing(
sample_rate, acquisition_time, pretrigger_time, sampling_mode)
# def configure_advanced_digital_timing(self,
# digital_sample_rate_control,
# digital_sample_rate, buffer_control, buffer_pretrigger_percent):
# ''' Configures the rate and buffer settings of the logic
# analyzer.
# This method allows for more advanced configuration options
# than MSO Configure Timing.
# '''
# def configure_state_mode(self, enable, clock_channel, clock_edge):
# ''' Configures how to clock data on the logic analyzer channels
# that are enabled.
# '''
def configure_immediate_trigger(self):
""" Configures a trigger to immediately activate on the specified
channels after the pretrigger time has expired.
"""
self.mso.configure_immediate_trigger()
def configure_analog_edge_trigger(self, trigger_source, trigger_slope,
trigger_level, trigger_hysteresis,
trigger_instance):
""" Configures a trigger to activate on the specified source when
the analog edge reaches the specified levels.
:param str trigger_source: Channel string
:param trigger_slope: Trigger slope (``'RISING'``, ``'FALLING'``
or ``'EITHER'``)
:type trigger_slope: int or str
:param float trigger_level: Trigger level
:param float trigger_hysteresis: Trigger hysteresis
:param trigger_instance: Trigger instance
:type trigger_instance: int or str
"""
trigger_source = self.validate_channel(trigger_source)
try:
pyvb.EdgeWithEither(trigger_slope)
except Exception:
try:
trigger_slope = pyvb.EdgeWithEither[trigger_slope.Upper()]
except Exception:
raise ValueError(
"Trigger Slope may be 0, 1, 2, 'RISING'," +
" 'FALLING' or 'EITHER'")
trigger_instance = self.validate_trigger_instance(trigger_instance)
self.mso.configure_analog_edge_trigger(
trigger_source, trigger_slope, trigger_level,
trigger_hysteresis, trigger_instance)
# def configure_digital_edge_trigger(self, trigger_source,
# trigger_slope, trigger_instance):
# ''' Configures a trigger to activate on the specified source when
# the digital edge reaches the specified levels.
# '''
# def configure_digital_pattern_trigger(self, trigger_source,
# trigger_pattern, trigger_instance):
# ''' Configures a trigger to activate on the specified channels
# when
# a digital pattern is matched. A trigger is produced when
# every
# level (high/low) requirement specified in Trigger Pattern is
# met, and when at least one toggling (toggle/fall/rise)
# requirement is met. If no toggling requirements are set, then
# only the level requirements must be met to produce a trigger.
# '''
# def configure_digital_glitch_trigger(self, trigger_source,
# trigger_instance):
# ''' Configures a trigger to activate on the specified channels
# when
# a digital glitch occurs. A glitch occurs when a channel in
# Trigger Source toggles between two edges of the sample clock,
# but has the same state for both samples. This may happen when
# the sampling rate is less than 1 GHz.
# '''
def configure_analog_pulse_width_trigger(self, trigger_source,
trigger_polarity,
trigger_level,
comparison_mode, lower_limit,
upper_limit,
trigger_instance):
""" Configures a trigger to activate on the specified source when
the analog edge reaches the specified levels within a specified
window of time.
:param str trigger_source: Channel string
:param trigger_polarity: Trigger slope (``'POSITIVE'`` or
``'NEGATIVE'``)
:type trigger_polarity: int or str
:param float trigger_level: Trigger level
:param comparison_mode: Mode of compariosn (
``'GREATER_THAN_UPPER_LIMIT'``,
``'LESS_THAN_LOWER_LIMIT'``,
``'INSIDE_LIMITS'`` or
``'OUTSIDE_LIMITS'``)
:type comparison_mode: int or str
:param float lower_limit: Lower limit
:param float upper_limit: Upper limit
:param trigger_instance: Trigger instance
:type trigger_instance: int or str
"""
trigger_source = self.validate_channel(trigger_source)
try:
pyvb.MsoTriggerPolarity(trigger_polarity)
except Exception:
try:
trigger_polarity = pyvb.MsoTriggerPolarity[
trigger_polarity.Upper()]
except Exception:
raise ValueError(
"Comparison Mode may be 0, 1, 2, 3," +
" 'GREATER_THAN_UPPER_LIMIT'," +
" 'LESS_THAN_LOWER_LIMIT'," +
" 'INSIDE_LIMITS' or 'OUTSIDE_LIMITS'")
try:
pyvb.MsoComparisonMode(comparison_mode)
except Exception:
try:
comparison_mode = pyvb.MsoComparisonMode[
comparison_mode.Upper()]
except Exception:
raise ValueError(
"Trigger Polarity may be 0, 1," +
" 'POSITIVE' or 'NEGATIVE'")
trigger_instance = self.validate_trigger_instance(trigger_instance)
self.mso.configure_analog_pulse_width_trigger(
trigger_source, trigger_polarity, trigger_level,
comparison_mode, lower_limit, upper_limit, trigger_instance)
def configure_digital_pulse_width_trigger(self, trigger_source,
trigger_polarity,
comparison_mode,
lower_limit, upper_limit,
trigger_instance):
''' Configures a trigger to activate on the specified source when
the digital edge reaches the specified levels within a specified
window of time.
'''
def configure_trigger_delay(self, trigger_delay):
""" Configures the amount of time to wait after a trigger condition
is met before triggering.
:param float trigger_delay: Trigger delay (0s - 17.1799s)
"""
self.mso.configure_trigger_delay(trigger_delay)
def query_analog_channel(self, channel):
""" Indicates the vertical configuration of the specified channel.
:return: Channel enabled, vertical range, vertical offset,
probe attenuation, vertical coupling
:rtype: (bool, float, float, enum, enum)
"""
channel = self.validate_channel(channel)
return self.mso.query_analog_channel(channel)
def query_enabled_analog_channels(self):
""" Returns String of enabled analog channels.
:return: Enabled analog channels
:rtype: str
"""
return self.mso.query_enabled_analog_channels()
def query_analog_channel_characteristics(self, channel):
""" Indicates the properties that control the electrical
characteristics of the specified channel.
This method returns an error if too much power is
applied to the channel.
:return: Input impedance, bandwidth limit
:rtype: (enum, float)
"""
return self.mso.query_analog_channel_characteristics(channel)
# def query_digital_channel(self):
# ''' Indicates whether the specified digital channel is enabled.
# '''
# def query_enabled_digital_channels(self):
# ''' No documentation
# '''
# def query_digital_threshold(self):
# ''' Indicates the threshold configuration of the logic analyzer
# channels.
# '''
def query_timing(self):
""" Indicates the timing configuration of the MSO.
Call directly before measurement to read the actual timing
configuration and write it to the corresponding class variables.
Necessary to interpret the measurement data, since it contains no
time information.
:return: Sample rate, acquisition time, pretrigger time,
sampling mode
:rtype: (float, float, float, enum)
"""
(self.sample_rate, self.acquisition_time,
self.pretrigger_time,
self.sampling_mode) = self.mso.query_timing()
return (self.sample_rate, self.acquisition_time,
self.pretrigger_time, self.sampling_mode)
# def query_advanced_digital_timing(self):
# ''' Indicates the buffer configuration of the logic analyzer.
# '''
# def query_state_mode(self, clockChannelSize):
# ''' Indicates the clock configuration of the logic analyzer.
# '''
def query_trigger_type(self, trigger_instance):
""" Indicates the trigger type of the specified instance.
:param trigger_instance: Trigger instance (``'A'`` or ``'B'``)
:return: Trigger type
:rtype: str
"""
return self.mso.query_trigger_type()
def query_analog_edge_trigger(self, trigger_instance):
""" Indicates the analog edge trigger configuration of the
specified instance.
:return: Trigger source, trigger slope, trigger level, trigger
hysteresis
:rtype: (str, enum, float, float)
"""
trigger_instance = self.validate_trigger_instance(trigger_instance)
return self.mso.query_analog_edge_trigger(trigger_instance)
# def query_digital_edge_trigger(self, trigger_instance):
# ''' Indicates the digital trigger configuration of the specified
# instance.
# '''
# def query_digital_pattern_trigger(self, trigger_instance):
# ''' Indicates the digital pattern trigger configuration of the
# specified instance. A trigger is produced when every level
# (high/low) requirement specified in Trigger Pattern is met,
# and
# when at least one toggling (toggle/fall/rise) requirement is
# met. If no toggling requirements are set, then only the level
# requirements must be met to produce a trigger.
# '''
# def query_digital_glitch_trigger(self, trigger_instance):
# ''' Indicates the digital glitch trigger configuration of the
# specified instance. A glitch occurs when a channel in Trigger
# Source toggles between two edges of the sample clock. This
# may
# happen when the sampling rate is less than 1 GHz.
# '''
def query_trigger_delay(self):
""" Indicates the trigger delay setting of the MSO.
:return: Trigger delay
:rtype: float
"""
return self.mso.query_trigger_delay()
def query_analog_pulse_width_trigger(self, trigger_instance):
""" Indicates the analog pulse width trigger configuration of the
specified instance.
:return: Trigger source, trigger polarity, trigger level,
comparison mode, lower limit, upper limit
:rtype: (str, enum, float, enum, float, float)
"""
trigger_instance = self.validate_trigger_instance(trigger_instance)
return self.mso.query_analog_pulse_width_trigger(trigger_instance)
# def query_digital_pulse_width_trigger(self, trigger_instance):
# ''' Indicates the digital pulse width trigger configuration of
# the specified instance.
# '''
def query_acquisition_status(self):
""" Returns the status of a completed or ongoing acquisition.
"""
return self.mso.query_acquisition_status()
def run(self, autoTrigger=True):
""" Transitions the acquisition from the Stopped state to the
Running state. If the current state is Triggered, the
acquisition is first transitioned to the Stopped state before
transitioning to the Running state. This method returns an
error if too much power is applied to any enabled channel.
:param bool autoTrigger: Enable/Disable auto triggering
"""
self.mso.run(autoTrigger)
def force_trigger(self):
""" Causes a software-timed trigger to occur after the pretrigger
time has expired.
"""
self.mso.force_trigger()
def stop(self):
""" Transitions the acquisition from either the Triggered or
Running state to the Stopped state.
"""
self.mso.stop()
# def read_analog(self, data_size):
# ''' Transfers data from the instrument as long as the acquisition
# state is Acquisition Complete. If the state is either Running or
# Triggered, this method will wait until the state transitions to
# Acquisition Complete. If the state is Stopped, this method
# returns an error.
# '''
# #return (data.value, data_stride.value, initial_timestamp,
# trigger_timestamp,
# MsoTriggerReason(trigger_reason.value))
# def read_digital_u64(self, data_size, sample_timestamps_size):
# ''' Transfers data from the instrument as long as the acquisition
# state is Acquisition Complete. If the state is either
# Running or
# Triggered, this method will wait until the state transitions
# to
# Acquisition Complete. If the state is Stopped, this method
# returns an error.
# '''
def read_analog_digital_u64(self):
""" Transfers data from the instrument as long as the acquisition
state is Acquisition Complete. If the state is either Running or
Triggered, this method will wait until the state transitions to
Acquisition Complete. If the state is Stopped, this method
returns an error.
:return: Analog data out, analog data stride, analog t0,
digital data out, digital timestamps out, digital t0,
trigger timestamp, trigger reason
:rtype: (list, int, pyvb.Timestamp, list, list, pyvb.Timestamp,
pyvb.Timestamp, enum)
"""
return self.mso.read_analog_digital_u64()
def read_analog_digital_dataframe(self):
""" Transfers data from the instrument and returns a pandas
dataframe of the analog measurement data, including time
coordinates
:return: Dataframe with time and measurement data
:rtype: pd.DataFrame
"""
(analog_data_out, analog_data_stride
# , analog_t0, digital_data_out, digital_timestamps_out,
# digital_t0, trigger_timestamp, trigger_reason
) = self.read_analog_digital_u64()[0:1]
number_of_samples = int(self.sample_rate *
self.acquisition_time) + 1
if not number_of_samples == (len(analog_data_out) /
analog_data_stride):
# try updating timing parameters
self.query_timing()
number_of_samples = int(self.sample_rate *
self.acquisition_time) + 1
if not number_of_samples == (len(analog_data_out) /
analog_data_stride):
raise ValueError(
"Length of Analog Data does not match" +
" Timing Parameters")
pretrigger_samples = int(self.sample_rate * self.pretrigger_time)
times = (
list(range(-pretrigger_samples, 0))
+ list(range(0, number_of_samples - pretrigger_samples + 1)))
times = [list(map(lambda x: x*1/self.sample_rate, times))]
np_array = np.array(analog_data_out)
np_array = np.split(np_array, analog_data_stride)
np_array = np.append(np.array(times), np_array, axis=0)
np_array = np.transpose(np_array)
return pd.DataFrame(data=np_array)
def reset_instrument(self):
""" Resets the session configuration to default values, and resets
the device and driver software to a known state.
"""
self.mso.reset()
# def export_configuration(self, configuration_filename):
# ''' Exports a configuration file for use with the MSO.
# '''
# def import_configuration(self, configuration_filename):
# ''' Imports a configuration file for use with the MSO. You can
# import PNG files exported from the VirtualBench Application
# or
# files created from MSO Export Configuration.
# '''
class PowerSupply():
""" Represents Power Supply (PS) Module of Virtual Bench device
"""
def __init__(self, virtualbench, reset, vb_name=''):
""" Acquire PS module
:param virtualbench: Instance of the VirtualBench class
:type virtualbench: VirtualBench
:param reset: Resets the instrument
:type reset: bool
"""
# Parameters & Handle of VirtualBench Instance
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " PS"
# Create DIO Instance
reset = strict_discrete_set(reset, [True, False])
log.info("Initializing %s." % self.name)
self.ps = self._vb_handle.acquire_power_supply(
self._device_name, reset)
self.isShutdown = False
def __del__(self):
""" Ensures the connection is closed upon deletion
"""
if self.isShutdown is not True:
self.ps.release()
def shutdown(self):
''' Stops the session and deallocates any resources acquired during
the session. If output is enabled on any channels, they remain
in their current state and continue to output data.
'''
log.info("Releasing %s" % self.name)
self.ps.release()
self.isShutdown = True
def validate_channel(self, channel, current=False, voltage=False):
""" Check if channel string is valid and if output current/voltage
are within the output ranges of the channel
:param channel: Channel string (``"ps/+6V","ps/+25V","ps/-25V"``)
:type channel: str
:param current: Current output, defaults to False
:type current: bool, optional
:param voltage: Voltage output, defaults to False
:type voltage: bool, optional
:return: channel or channel, current & voltage
:rtype: str or (str, float, float)
"""
if current is False and voltage is False:
return strict_discrete_set(
channel, ["ps/+6V", "ps/+25V", "ps/-25V"])
else:
channel = strict_discrete_set(
channel, ["ps/+6V", "ps/+25V", "ps/-25V"])
if channel == "ps/+6V":
current_range = range(0, 1001)
voltage_range = range(0, 6001)
else:
current_range = range(0, 501)
voltage_range = range(0, 25001)
if channel == "ps/-25V":
voltage_range = map(lambda x: -x, voltage_range)
current_range = map(lambda x: x/1000, current_range)
voltage_range = map(lambda x: x/1000, voltage_range)
current = strict_range(current, current_range)
voltage = strict_range(voltage, voltage_range)
return (channel, current, voltage)
def configure_voltage_output(self, channel, voltage_level,
current_limit):
''' Configures a voltage output on the specified channel. This
method should be called once for every channel you want to
configure to output voltage.
'''
(channel, current_limit, voltage_level) = self.validate_channel(
channel, current_limit, voltage_level)
self.ps.configure_voltage_output(
channel, voltage_level, current_limit)
def configure_current_output(self, channel, current_level,
voltage_limit):
''' Configures a current output on the specified channel. This
method should be called once for every channel you want to
configure to output current.
'''
(channel, current_level, voltage_limit) = self.validate_channel(
channel, current_level, voltage_limit)
self.ps.configure_current_output(
channel, current_level, voltage_limit)
def query_voltage_output(self, channel):
''' Indicates the voltage output settings on the specified channel.
'''
channel = self.validate_channel(channel)
return self.ps.query_voltage_output(channel)
def query_current_output(self, channel):
''' Indicates the current output settings on the specified channel.
'''
channel = self.validate_channel(channel)
return self.ps.query_current_output(channel)
@property
def outputs_enabled(self):
''' Enables or disables all outputs on all channels of the
instrument.
:param bool enable_outputs: Enable/Disable outputs
'''
return self.ps.query_outputs_enabled()
@outputs_enabled.setter
def outputs_enabled(self, enable_outputs):
enable_outputs = strict_discrete_set(
enable_outputs, [True, False])
log.info("%s Output %s." % (self.name, enable_outputs))
self.ps.enable_all_outputs(enable_outputs)
# def enable_all_outputs(self, enable_outputs):
# ''' Enables or disables all outputs on all channels of the
# instrument.
# '''
# enable_outputs = strict_discrete_set(
# enable_outputs, [True,False])
# self.ps.enable_all_outputs(enable_outputs)
# def query_outputs_enabled(self):
# ''' Indicates whether the outputs are enabled for the instrument.
# '''
# self.ps.query_outputs_enabled()
@property
def tracking(self):
''' Enables or disables tracking between the positive and negative
25V channels. If enabled, any configuration change on the
positive 25V channel is mirrored to the negative 25V channel,
and any writes to the negative 25V channel are ignored.
:param bool enable_tracking: Enable/Disable tracking
'''
return self.ps.query_tracking()
@tracking.setter
def tracking(self, enable_tracking):
enable_tracking = strict_discrete_set(
enable_tracking, [True, False])
self.ps.enable_tracking(enable_tracking)
# def query_tracking(self):
# ''' Indicates whether voltage tracking is enabled on
# the instrument.
# '''
# self.ps.query_tracking()
# def enable_tracking(self, enable_tracking):
# ''' Enables or disables tracking between the positive and
# negative
# 25V channels. If enabled, any configuration change on the
# positive 25V channel is mirrored to the negative 25V channel,
# and any writes to the negative 25V channel are ignored.
# '''
# enable_tracking = strict_discrete_set(
# enable_tracking,[True,False])
# self.ps.enable_tracking(enable_tracking)
def read_output(self, channel):
''' Reads the voltage and current levels and outout mode of the
specified channel.
'''
channel = self.validate_channel(channel)
return self.ps.read_output()
def reset_instrument(self):
''' Resets the session configuration to default values, and resets
the device and driver software to a known state.
'''
self.ps.reset_instrument()
| 42.906775 | 84 | 0.568988 |
b82a9c9533bd1da0e1118016917ab95fe392feaf | 14,467 | py | Python | spax/ops/ops_test.py | jackd/spax | bc55a2660d468838aa1c080d6152d0be73eab118 | [
"Apache-2.0"
] | 1 | 2021-08-11T23:25:15.000Z | 2021-08-11T23:25:15.000Z | spax/ops/ops_test.py | jackd/spax | bc55a2660d468838aa1c080d6152d0be73eab118 | [
"Apache-2.0"
] | null | null | null | spax/ops/ops_test.py | jackd/spax | bc55a2660d468838aa1c080d6152d0be73eab118 | [
"Apache-2.0"
] | null | null | null | import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest, parameterized # pylint: disable=no-name-in-module
from jax import test_util as jtu
from jax.config import config
from jax.experimental.sparse.ops import COO, CSR
from spax import ops
from spax.test_utils import random_uniform
# pylint: disable=undefined-variable
config.parse_flags_with_absl()
ALL_TYPES = (CSR, COO, jnp.ndarray)
class SparseOpsTest(jtu.JaxTestCase):
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
)
)
def test_to_dense(self, sparse_type, shape, dtype):
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=jnp.ndarray)
sp = sparse_type.fromdense(mat)
redense = ops.to_dense(sp)
self.assertAllClose(redense, mat)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
)
)
def test_add_sparse(self, sparse_type, shape, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat0 = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mat1 = random_uniform(k1, shape, dtype=dtype, fmt=sparse_type)
actual = ops.to_dense(ops.add(mat0, mat1))
expected = ops.to_dense(mat0) + ops.to_dense(mat1)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}_r{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
other_rank,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
"other_rank": other_rank,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
for other_rank in (0, 1, 2, 3)
)
)
def test_add_array(self, sparse_type, shape, other_rank, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
if other_rank > len(shape):
shape = tuple(range(2, 2 + len(shape) - other_rank)) + shape
else:
shape = shape[-other_rank:]
v = jax.random.uniform(k1, shape=shape, dtype=dtype)
actual = ops.to_dense(ops.add(mat, v))
expected = ops.to_dense(mat) + v
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}_r{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
other_rank,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
"other_rank": other_rank,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
for other_rank in (0, 1, 2, 3)
)
)
def test_mul_array(self, sparse_type, shape, other_rank, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
if other_rank > len(shape):
shape = tuple(range(2, 2 + len(shape) - other_rank)) + shape
else:
shape = shape[-other_rank:]
v = jax.random.uniform(k1, shape=shape, dtype=dtype)
actual = ops.to_dense(ops.add(mat, v))
expected = ops.to_dense(mat) + v
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_matmul(self, nx, ny, nh, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx, nh), dtype=dtype)
y = jax.random.uniform(keys[2], (nh, ny), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_matmul(mat, x, y)))
xt = x @ y
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_inner(self, nx, ny, nh, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nh, nx), dtype=dtype)
y = jax.random.uniform(keys[2], (nh, ny), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_inner(mat, x, y)))
xt = x.T @ y
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
)
)
def test_masked_outer(self, nx, ny, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx,), dtype=dtype)
y = jax.random.uniform(keys[2], (ny,), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_outer(mat, x, y)))
xt = jnp.outer(x, y)
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((size, size), dtype),
sparse_type.__name__,
),
"size": size,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for size in (5, 7)
)
)
def test_symmetrize(self, size, dtype, sparse_type):
mat = random_uniform(
jax.random.PRNGKey(0), (size, size), dtype=dtype, fmt=sparse_type
)
actual = ops.symmetrize(mat)
expected = ops.to_dense(mat)
expected = (expected + expected.T) / 2
self.assertAllClose(ops.to_dense(actual), expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_boolean_mask(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
k0, k1 = jax.random.split(jax.random.PRNGKey(0), 2)
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mask = jax.random.uniform(k1, (shape[axis],)) > 0.5
expected = ops.to_dense(mat)
if axis == 0:
expected = expected[mask]
else:
expected = expected[:, mask]
actual = ops.to_dense(ops.boolean_mask(mat, mask, axis=axis))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_gather(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
k0, k1 = jax.random.split(jax.random.PRNGKey(0), 2)
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mask = jax.random.uniform(k1, (shape[axis],)) > 0.5
(indices,) = jnp.where(mask)
del mask
expected = ops.to_dense(mat)
if axis == 0:
expected = expected[indices]
else:
expected = expected[:, indices]
actual = ops.to_dense(ops.gather(mat, indices, axis=axis))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_sum(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
expected = ops.sum(mat, axis=axis)
actual = ops.to_dense(mat).sum(axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_max(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
expected = ops.max(mat, axis=axis)
actual = ops.to_dense(mat).max(axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}_{ord}",
"sparse_type": sparse_type,
"axis": axis,
"ord": ord,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
for ord in (1, 2, jnp.inf)
)
)
def test_norm(
self,
sparse_type,
ord,
axis, # pylint: disable=redefined-builtin
):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
mat = ops.map_data(mat, lambda d: d - 0.5) # make sure we have some negatives
expected = ops.norm(mat, ord=ord, axis=axis)
actual = jnp.linalg.norm(ops.to_dense(mat), ord=ord, axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_outer_rank2(self, nh, nx, ny, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx, nh), dtype=dtype)
y = jax.random.uniform(keys[2], (ny, nh), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_outer(mat, x, y)))
xt = x @ y.T
expected = jnp.where(ops.to_dense(mat) == 0, jnp.zeros_like(xt), xt)
self.assertAllClose(actual, expected)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 36.625316 | 88 | 0.532177 |
05d43af5057b468b56f5f6b0cc3a796249cad0d7 | 30,073 | py | Python | aesara/tensor/shape.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/shape.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/shape.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | import warnings
from numbers import Number
from typing import Dict, List, Tuple, Union
import numpy as np
import aesara
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply, Constant, Variable
from aesara.link.c.op import COp
from aesara.link.c.params_type import ParamsType
from aesara.misc.safe_asarray import _asarray
from aesara.scalar import int32
from aesara.tensor import _get_vector_length
from aesara.tensor import basic as at
from aesara.tensor import get_vector_length
from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.type import TensorType, int_dtypes, tensor
from aesara.tensor.var import TensorConstant, TensorVariable
def register_shape_c_code(type, code, version=()):
"""
Tell Shape Op how to generate C code for an Aesara Type.
Parameters
----------
typ : Aesara type
It must be the Aesara class itself and not an instance of the class.
code : C code
Returns a vector representing the shape for the Aesara type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively.
version
A number indicating the version of the code, for cache.
"""
Shape.c_code_and_version[type] = (code, version)
class Shape(COp):
"""
L{Op} to return the shape of a matrix.
Notes
-----
Non-differentiable.
"""
_f16_ok = True
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version: Dict = {}
check_input = False
__props__ = ()
def make_node(self, x):
if not isinstance(x, Variable):
x = at.as_tensor_variable(x)
if isinstance(x.type, TensorType):
out_var = TensorType("int64", (x.type.ndim,))()
else:
out_var = aesara.tensor.type.lvector()
return Apply(self, [x], [out_var])
def perform(self, node, inp, out_):
(x,) = inp
(out,) = out_
out[0] = _asarray(np.shape(x), dtype="int64")
def infer_shape(self, fgraph, node, in_shapes):
return [[len(in_shapes[0])]]
def connection_pattern(self, node):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [[False]]
def grad(self, inp, grads):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [aesara.gradient.DisconnectedType()()]
def R_op(self, inputs, eval_points):
return [None]
def c_code(self, node, name, inames, onames, sub):
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
return code % locals()
# Else, no C code
raise NotImplementedError()
def c_code_cache_version(self):
version = []
# If any of the c code is unversioned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
f"Type {t} has C code for Shape, but it has no "
"version. You should add a 'version' keyword "
"arg when calling register_shape_c_code.",
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(1)
return tuple(version)
_shape = Shape()
def shape(x: Union[np.ndarray, Number, Variable]) -> Variable:
"""Return the shape of `x`."""
if not isinstance(x, Variable):
x = at.as_tensor_variable(x)
x_type = x.type
if isinstance(x_type, TensorType) and all(s is not None for s in x_type.shape):
res = at.as_tensor_variable(x_type.shape, ndim=1, dtype=np.int64)
else:
res = _shape(x)
return res
@_get_vector_length.register(Shape)
def _get_vector_length_Shape(op, var):
return var.owner.inputs[0].type.ndim
def shape_tuple(x: TensorVariable) -> Tuple[Variable, ...]:
"""Get a tuple of symbolic shape values.
This will return a `ScalarConstant` with the value ``1`` wherever
broadcastable is ``True``.
"""
one_at = aesara.scalar.ScalarConstant(aesara.scalar.int64, 1)
return tuple(
one_at if getattr(sh, "value", sh) == 1 or bcast else sh
for sh, bcast in zip(
shape(x), getattr(x, "broadcastable", (False,) * x.type.ndim)
)
)
class Shape_i(COp):
"""
L{Op} to return the shape of a matrix.
Notes
-----
Non-differentiable.
"""
_f16_ok = True
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version: Dict = {}
check_input = False
__props__ = ("i",)
def __init__(self, i):
# As i will be used in the hash and that ndarray are not hashable,
# we need to convert it to an int as it is hashable.
if isinstance(i, np.ndarray):
assert i.dtype in aesara.tensor.type.integer_dtypes
assert i == int(i)
i = int(i)
self.i = i
# NB:
# 1) params_type is defined as a property to avoid
# loop in Python import caused by importing aesara.scalar below
# when params_type is defined directly in class code.
# 2) We wrap scalar into ParamsType (instead of directly using scalar as op param)
# to avoid Aesara converting scalar param to constant that would be later
# hardcoded as literal in C code, making us loose all the advantages of
# using params.
@property
def params_type(self):
return ParamsType(i=aesara.scalar.basic.int64)
def __str__(self):
return "%s{%i}" % (self.__class__.__name__, self.i)
def make_node(self, x):
if not isinstance(x, Variable) or not hasattr(x.type, "ndim"):
raise TypeError(
f"{x} must be `Variable` with a `Type` having an ndim attribute"
)
if x.type.ndim <= self.i:
raise TypeError(f"{x} has too few dimensions for Shape_i")
return Apply(self, [x], [aesara.tensor.type.lscalar()])
def perform(self, node, inp, out_, params):
(x,) = inp
(out,) = out_
if out[0] is None:
out[0] = _asarray(np.shape(x)[self.i], dtype="int64")
else:
out[0][...] = np.shape(x)[self.i]
def c_code_cache_version(self):
version = []
# If any of the c code is unversioned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, ci, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
f"Type {t} has C code for Shape_i, but it has "
"no version. You should add a 'version' keyword "
"arg when calling register_shape_i_c_code.",
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(2)
return tuple(version)
def c_code(self, node, name, inames, onames, sub):
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
# i is then 'params->i', not just 'params'.
i = sub["params"] + "->i"
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, check_input, version = self.c_code_and_version[itype]
return (check_input + code) % locals()
# Else, no C code
raise NotImplementedError()
def infer_shape(self, fgraph, node, input_shapes):
return [()]
def connection_pattern(self, node):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [[False]]
def grad(self, inp, grads):
return [
aesara.gradient.grad_not_implemented(
op=self,
x_pos=0,
x=inp[0],
comment=("No gradient for the shape of a matrix " "is implemented."),
)
]
def shape_i(var, i, fgraph=None):
"""
Equivalent of var.shape[i], but apply if possible the shape feature
optimization.
This is useful in optimization that need to get the shape. This
remove the need of the following shape_feature optimization that
convert it. So this speed up optimization and remove Equilibrium
max iteration problems.
Parameters
----------
var : Variable
The variable we want to take the shape of.
i : int
The shape dimensions we want
fgraph : FunctionGraph (optional)
"""
if fgraph and hasattr(fgraph, "shape_feature"):
shape_feature = fgraph.shape_feature
shape_of = shape_feature.shape_of
def recur(node):
if not node.outputs[0] in shape_of:
for inp in node.inputs:
if inp.owner:
recur(inp.owner)
# If the output var isn't marked as being in the graph,
# we need to add it in the ShapeFeature.
shape_feature.on_import(fgraph, node, "graph.ops.shape_i")
if var not in shape_of:
recur(var.owner)
return shape_of[var][i]
# If we are not able to use the shape feature, we should not put
# Shape_i in the graph. Otherwise, the shape feature optimization
# won't get applied.
return shape(var)[i]
def shape_i_op(i):
key = i
if key not in shape_i_op.cache:
shape_i_op.cache[key] = Shape_i(i)
return shape_i_op.cache[key]
shape_i_op.cache = {}
def register_shape_i_c_code(typ, code, check_input, version=()):
"""
Tell Shape_i how to generate C code for an Aesara Type.
Parameters
----------
typ : Aesara type
It must be the Aesara class itself and not an instance of the class.
code : C code
Gets the shape of dimensions %(i)s for the Aesara type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively.
version
A number indicating the version of the code, for cache.
"""
Shape_i.c_code_and_version[typ] = (code, check_input, version)
def register_specify_shape_c_code(typ, code, version=(), c_support_code_apply=None):
"""
Tell SpecifyShape how to generate C code for an Aesara Type.
Parameters
----------
typ : Aesara type
It must be the Aesara class itself and not an instance of the class.
code : C code
Checks the shape and returns a view for the Aesara type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively. %(shape)s is the vector of shape of %(iname)s.
Check that its length is good.
version
A number indicating the version of the code, for cache.
c_support_code_apply
Extra code.
"""
SpecifyShape.c_code_and_version[typ] = (code, version, c_support_code_apply)
class SpecifyShape(COp):
"""
L{Op} that puts into the graph the user-provided shape.
In the case where this `Op` stays in the final graph, we assert the shape.
For this the output of this op must be used in the graph. This is not
the case most of the time if we only take the shape of the output.
Maybe there are other optimizations that will mess with this.
Notes
-----
Maybe in the future we will never do the assert!
We currently don't support specifying partial shape information.
TODO : test this op with sparse. Do C code for them too.
"""
view_map = {0: [0]}
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version: Dict = {}
__props__ = ()
_f16_ok = True
def make_node(self, x, shape):
if not isinstance(x, Variable):
x = at.as_tensor_variable(x)
shape = at.as_tensor_variable(shape, ndim=1)
if isinstance(shape, Constant):
shape = tuple(shape.data)
else:
shape = tuple(at.as_tensor_variable(s, ndim=0) for s in shape)
if any(s.dtype not in aesara.tensor.type.integer_dtypes for s in shape):
raise TypeError("Shape values must be integer types")
if len(shape) != x.type.ndim:
raise ValueError(
f"Input `x` is {x.type.ndim}-dimensional and will never match a shape of length {len(shape)}."
)
if isinstance(x.type, TensorType) and all(isinstance(s, Number) for s in shape):
out_var = x.type.clone(shape=shape)()
else:
out_var = x.type()
in_shape = at.as_tensor_variable(shape, ndim=1)
return Apply(self, [x, in_shape], [out_var])
def perform(self, node, inp, out_):
x, shape = inp
(out,) = out_
ndim = len(shape)
if x.ndim != ndim:
raise AssertionError(
f"SpecifyShape: Got {x.ndim} dimensions (shape {x.shape}), expected {ndim} dimensions with shape {tuple(shape)}."
)
if x.shape != tuple(shape):
raise AssertionError(
f"SpecifyShape: Got shape {x.shape}, expected {tuple(shape)}."
)
out[0] = x
def infer_shape(self, fgraph, node, shapes):
xshape, sshape = shapes
new_shape = []
for dim in range(node.inputs[0].type.ndim):
try:
s = at.get_scalar_constant_value(node.inputs[1][dim])
s = at.as_tensor_variable(s)
new_shape.append(s)
except NotScalarConstantError:
new_shape.append(node.inputs[1][dim])
assert len(new_shape) == len(xshape)
return [new_shape]
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, s = inp
(gz,) = grads
# Should I set an SpecifyShape on gz? I think so
# But I don't do it now as we need to make an optimization
# to remove that op from the graph to don't block other optimization
# Should I do an optimizer that will remove the SpecifyShape?
# I think Yes
return [gz, aesara.gradient.DisconnectedType()()]
return [specify_shape(gz, s), aesara.gradient.DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
# It means that the this op sits on top of a non-differentiable
# path
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def c_support_code_apply(self, node, name):
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
_, _, support_code = self.c_code_and_version[itype]
if support_code:
return support_code
return super().c_support_code_apply(node, name)
def c_code(self, node, name, inames, onames, sub):
iname, shape = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version, _ = self.c_code_and_version[itype]
return code % locals()
raise NotImplementedError()
def c_code_cache_version(self):
version = []
# If any of the c code is unversioned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v, _) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for SpecifyShape, but it "
"has no version. You should add a 'version' "
"keyword arg when calling "
"register_specify_shape_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
return tuple(version)
_specify_shape = SpecifyShape()
def specify_shape(
x: Union[np.ndarray, Number, Variable],
shape: Union[
int, List[Union[int, Variable]], Tuple[Union[int, Variable]], Variable
],
):
"""Specify a fixed shape for a `Variable`."""
if not isinstance(x, Variable):
x = at.as_tensor_variable(x)
if np.ndim(shape) == 0:
shape = at.as_tensor_variable([shape])
try:
_ = get_vector_length(shape)
except ValueError:
raise ValueError("Shape must have fixed dimensions")
if isinstance(shape, Constant):
shape = tuple(shape.data)
return _specify_shape(x, shape)
@_get_vector_length.register(SpecifyShape)
def _get_vector_length_SpecifyShape(op, var):
try:
return at.get_scalar_constant_value(var.owner.inputs[1])
except NotScalarConstantError:
raise ValueError(f"Length of {var} cannot be determined")
class Reshape(COp):
"""Perform a reshape operation of the input x to the new shape shp.
The number of dimensions to which to reshape to (ndim) must be
known at graph build time.
"""
view_map = {0: [0]} # output 0 is potentially aliased to inputs [0]
_f16_ok = True
check_input = False
__props__ = ("ndim",)
params_type = ParamsType(ndim=int32)
# name does not participate because it doesn't affect computations
def __init__(self, ndim, name=None):
self.ndim = int(ndim)
if ndim < 0:
raise ValueError("The output dimensions after reshape must be 0 or greater")
assert name is None, "name attribute for Reshape has been deprecated"
def __str__(self):
return f"{self.__class__.__name__}{{{self.ndim}}}"
def make_node(self, x, shp):
x = at.as_tensor_variable(x)
shp_orig = shp
shp = at.as_tensor_variable(shp, ndim=1)
if not (
shp.dtype in int_dtypes
or (isinstance(shp, TensorConstant) and shp.data.size == 0)
):
# It raises an error if shp is not of integer type,
# except when shp is constant and empty
# (in this case, shp.dtype does not matter anymore).
raise TypeError(f"Shape must be integers; got {shp.dtype}")
assert shp.ndim == 1
if isinstance(shp, TensorConstant):
bcast = [s == 1 for s in shp.data]
return Apply(self, [x, shp], [tensor(x.type.dtype, bcast)])
else:
bcasts = [False] * self.ndim
shp_list = shp_orig
if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0:
shp_list = [shp_orig]
for index in range(self.ndim):
y = shp_list[index]
y = at.as_tensor_variable(y)
# Try to see if we can infer that y has a constant value of 1.
# If so, that dimension should be broadcastable.
try:
bcasts[index] = (
hasattr(y, "get_scalar_constant_value")
and y.get_scalar_constant_value() == 1
)
except NotScalarConstantError:
pass
return Apply(self, [x, shp], [tensor(x.type.dtype, bcasts)])
def perform(self, node, inp, out_, params):
x, shp = inp
(out,) = out_
if len(shp) != self.ndim:
raise ValueError(
(
"Shape argument to Reshape has incorrect"
f" length: {len(shp)}, should be {self.ndim}"
)
)
out[0] = np.reshape(x, shp)
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, shp = inp
(g_out,) = grads
return [reshape(g_out, shape(x), ndim=x.ndim), DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], return_list=True)
def infer_shape(self, fgraph, node, ishapes):
from aesara.tensor.math import eq, maximum, mul
# inputs[1] can contain at most one value of '-1', meaning the actual
# shape of the output will be automatically computed by reshape, so
# that the total number of elements stays the same.
# TODO: Maybe put that formula here?
# It's not trivial, because we would have to check if the product of
# all the non-minus-one shapes is a divisor of the product of the
# original shapes.
# The following expression leads to cycles in feature_shape,
# because it tries to replace the Shape_i node by the switch
# statement, which depends on Shape_i.
# return [tuple([switch(eq(node.inputs[1][i], -1),
# Shape_i(i)(node.outputs[0]),
# node.inputs[1][i])
# for i in range(self.ndim)]
# )]
# Here, we only simplify if the shape (node.inputs[1]) is a constant,
# ideally it would suffice to check that it is always non-negative.
# If current variable is a scalar and its dimensionality should
# change to self.ndim, then use size 1 for all new dimensions.
if len(ishapes[0]) == 0:
return [(1,) * self.ndim]
requ = node.inputs[1]
input_size = mul(*ishapes[0])
if isinstance(requ, TensorConstant):
requ = list(requ.data)
requ_part = [ele for ele in requ if ele != -1]
crit = len(requ) - len(requ_part)
if crit == 1 and len(requ_part) > 0:
# If there are both 0 and -1 in requ_size, it is impossible
# to determine a right output, but we can at least prevent
# a division by 0. We do not want to keep a negative
# size here as it could lead to further weird errors
# after other optimizations.
requ_size = mul(*requ_part)
missing = input_size // (1 if requ_size == 0 else requ_size)
for i, ele in enumerate(requ):
if ele == -1:
requ[i] = missing
elif crit == 1: # we reshape to -1
requ = [input_size] if ishapes[0] else [1]
elif crit > 1:
raise ValueError(
"shape argument to Reshape.perform"
" must have at most one entry equal to -1"
)
return [requ]
else:
requ = [requ[i] for i in range(self.ndim)]
# since new_dims can have negative value (-1), the
# multiplication of all values should be negated
# to give a positive value.
# To avoid optimization complexity, we avoid checking
# for the case when there are two or more '-1' values.
if self.ndim:
requ_size = -mul(*requ)
# If there are both 0 and -1 in requ_size, it is impossible
# to determine a right output, but we can at least prevent
# a division by 0. We do not want to keep a negative
# size here as it could lead to further weird errors
# after other optimizations.
rest_size = input_size // maximum(requ_size, 1)
return [
tuple(
[
at.switch(eq(requ[i], -1), rest_size, requ[i])
for i in range(self.ndim)
]
)
]
def c_code_cache_version(self):
return (9,)
def c_code(self, node, name, inputs, outputs, sub):
x, shp = inputs
(z,) = outputs
fail = sub["fail"]
params = sub["params"]
return f"""
assert (PyArray_NDIM({shp}) == 1);
PyArray_Dims newshape;
if (!PyArray_IntpConverter((PyObject *){shp}, &newshape)) {{
{fail};
}}
if ({params}->ndim != newshape.len) {{
PyErr_SetString(PyExc_ValueError, "Shape argument to Reshape has incorrect length");
PyDimMem_FREE(newshape.ptr);
{fail};
}}
Py_XDECREF({z});
{z} = (PyArrayObject *) PyArray_Newshape({x}, &newshape, NPY_CORDER);
PyDimMem_FREE(newshape.ptr);
if (!{z}) {{
//The error message should have been set by PyArray_Newshape
{fail};
}}
"""
def reshape(x, newshape, ndim=None):
if ndim is None:
newshape = at.as_tensor_variable(newshape)
if newshape.ndim != 1:
raise TypeError(
"New shape in reshape must be a vector or a list/tuple of"
f" scalar. Got {newshape} after conversion to a vector."
)
try:
ndim = get_vector_length(newshape)
except ValueError:
raise ValueError(
f"The length of the provided shape ({newshape}) cannot "
"be automatically determined, so Aesara is not able "
"to know what the number of dimensions of the reshaped "
"variable will be. You can provide the 'ndim' keyword "
"argument to 'reshape' to avoid this problem."
)
op = Reshape(ndim)
rval = op(x, newshape)
return rval
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = at.as_tensor_variable(t)
pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)]
return _t.dimshuffle(pattern)
def shape_padright(t, n_ones=1):
"""Reshape `t` by right-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padleft
Dimshuffle
"""
_t = at.as_tensor_variable(t)
pattern = [i for i in range(_t.type.ndim)] + ["x"] * n_ones
return _t.dimshuffle(pattern)
def shape_padaxis(t, axis):
"""Reshape `t` by inserting 1 at the dimension `axis`.
Examples
--------
>>> tensor = aesara.tensor.type.tensor3()
>>> aesara.tensor.shape_padaxis(tensor, axis=0)
DimShuffle{x,0,1,2}.0
>>> aesara.tensor.shape_padaxis(tensor, axis=1)
DimShuffle{0,x,1,2}.0
>>> aesara.tensor.shape_padaxis(tensor, axis=3)
DimShuffle{0,1,2,x}.0
>>> aesara.tensor.shape_padaxis(tensor, axis=-1)
DimShuffle{0,1,2,x}.0
See Also
--------
shape_padleft
shape_padright
Dimshuffle
"""
_t = at.as_tensor_variable(t)
ndim = _t.ndim + 1
if not -ndim <= axis < ndim:
msg = "axis {0} is out of bounds [-{1}, {1})".format(axis, ndim)
raise IndexError(msg)
if axis < 0:
axis += ndim
pattern = [i for i in range(_t.type.ndim)]
pattern.insert(axis, "x")
return _t.dimshuffle(pattern)
register_shape_c_code(
TensorType,
"""
npy_intp shape[] = {PyArray_NDIM(%(iname)s)};
if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))
{
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);
}
for(int i=0;i<shape[0];i++)
{
((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = PyArray_DIMS(%(iname)s)[i];
}
""",
version=1,
)
register_shape_i_c_code(
TensorType,
"""
if(!%(oname)s)
%(oname)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(oname)s))[0]=PyArray_DIMS(%(iname)s)[%(i)s];
""",
"""
if (%(i)s>=PyArray_NDIM(%(iname)s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)s
}
""",
version=3,
)
register_specify_shape_c_code(
TensorType,
"""
if (PyArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: Got %%d dimensions, expected %%d dimensions.",
PyArray_NDIM(%(iname)s),
PyArray_DIMS(%(shape)s)[0]
);
%(fail)s;
}
for(int i = 0; i < PyArray_NDIM(%(iname)s); i++){
dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,
i))[0];
if (PyArray_DIMS(%(iname)s)[i] != shp) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: dim %%d of input has shape %%d,"
" expected %%d.",
i, PyArray_DIMS(%(iname)s)[i],
shp);
%(fail)s;
}
}
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1,
)
| 32.830786 | 129 | 0.576697 |
b2b89e2ea2ebf42189e5b47c2746d75d9cfce451 | 3,797 | py | Python | src/clikit/ui/components/exception_trace.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | src/clikit/ui/components/exception_trace.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | src/clikit/ui/components/exception_trace.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | import ast
import inspect
import keyword
import math
import sys
import traceback
from clikit.api.io import IO
class ExceptionTrace(object):
"""
Renders the trace of an exception.
"""
THEME = {
"comment": "<fg=black;options=bold>",
"keyword": "<fg=yellow>",
"builtin": "<fg=blue>",
"literal": "<fg=magenta>",
}
AST_ELEMENTS = {
"builtins": __builtins__.keys()
if type(__builtins__) is dict
else dir(__builtins__),
"keywords": [
getattr(ast, cls)
for cls in dir(ast)
if keyword.iskeyword(cls.lower())
and inspect.isclass(getattr(ast, cls))
and issubclass(getattr(ast, cls), ast.AST)
],
}
def __init__(self, exception): # type: (Exception) -> None
self._exception = exception
self._exc_info = sys.exc_info()
def render(self, io): # type: (IO) -> None
if hasattr(self._exception, "__traceback__"):
tb = self._exception.__traceback__
else:
tb = self._exc_info[2]
title = "\n[<error>{}</error>]\n<error>{}</error>".format(
self._exception.__class__.__name__, str(self._exception)
)
io.write_line(title)
if io.is_verbose():
io.write_line("")
self._render_traceback(io, tb)
def _render_traceback(self, io, tb): # type: (IO, ...) -> None
frames = []
while tb:
frames.append(self._format_traceback_frame(io, tb))
tb = tb.tb_next
io.write_line("<comment>Traceback (most recent call last):</comment>")
io.write_line("".join(traceback.format_list(frames)))
def _format_traceback_frame(self, io, tb): # type: (IO, ...) -> Tuple[Any]
frame_info = inspect.getframeinfo(tb)
filename = frame_info.filename
lineno = frame_info.lineno
function = frame_info.function
line = frame_info.code_context[0]
stripped_line = line.lstrip(" ")
try:
tree = ast.parse(stripped_line, mode="exec")
formatted = self._format_tree(tree, stripped_line, io)
formatted = (len(line) - len(stripped_line)) * " " + formatted
except SyntaxError:
formatted = line
return (
io.format("<info>{}</info>".format(filename)),
lineno,
function,
formatted,
)
def _format_tree(self, tree, source, io):
offset = 0
chunks = []
nodes = [n for n in ast.walk(tree)]
displayed_nodes = []
for node in nodes:
nodecls = node.__class__
nodename = nodecls.__name__
if "col_offset" not in dir(node):
continue
if nodecls in self.AST_ELEMENTS["keywords"]:
displayed_nodes.append((node, nodename.lower(), "keyword"))
elif nodecls == ast.Name and node.id in self.AST_ELEMENTS["builtins"]:
displayed_nodes.append((node, node.id, "builtin"))
elif nodecls == ast.Str:
displayed_nodes.append((node, "'{}'".format(node.s), "literal"))
elif nodecls == ast.Num:
displayed_nodes.append((node, str(node.n), "literal"))
displayed_nodes.sort(key=lambda elem: elem[0].col_offset)
for dn in displayed_nodes:
node = dn[0]
s = dn[1]
theme = dn[2]
begin_col = node.col_offset
src_chunk = source[offset:begin_col]
chunks.append(src_chunk)
chunks.append(io.format("{}{}</>".format(self.THEME[theme], s)))
offset = begin_col + len(s)
chunks.append(source[offset:])
return "".join(chunks)
| 29.664063 | 82 | 0.553595 |
64a595d8e6954ed7fa0018496fd27f1fb17173b6 | 12,440 | py | Python | src/Products/PluggableAuthService/tests/test_UserFolder.py | perrinjerome/Products.PluggableAuthService | d77e403ca53b8b915d47c5a72549e1a3cde84934 | [
"ZPL-2.1"
] | null | null | null | src/Products/PluggableAuthService/tests/test_UserFolder.py | perrinjerome/Products.PluggableAuthService | d77e403ca53b8b915d47c5a72549e1a3cde84934 | [
"ZPL-2.1"
] | null | null | null | src/Products/PluggableAuthService/tests/test_UserFolder.py | perrinjerome/Products.PluggableAuthService | d77e403ca53b8b915d47c5a72549e1a3cde84934 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this
# distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from six.moves import range
from AccessControl import Unauthorized
from AccessControl.Permissions import add_folders as AddFolders
from AccessControl.Permissions import view as View
from zope import event
from zope.component import adapter
from zope.component import provideHandler
from ZPublisher.utils import basic_auth_encode
from ..events import CredentialsUpdated
from ..events import PASEventNotify
from ..events import userCredentialsUpdatedHandler
from ..interfaces.events import IPrincipalCreatedEvent
from ..PluggableAuthService import PluggableAuthService
from ..tests import pastc
@adapter(IPrincipalCreatedEvent)
def userCreatedHandler(event):
pas = event.principal.aq_parent
if not hasattr(pas, 'events'):
pas.events = []
pas.events.append(event)
class UserFolderTests(pastc.PASTestCase):
def afterSetUp(self):
# Set up roles and a user
self.uf = self.folder.acl_users
self.folder._addRole('role1')
self.folder.manage_role('role1', [View])
self.uf.roles.addRole('role1')
self.folder._addRole('role2')
self.folder.manage_role('role2', [View])
self.uf.roles.addRole('role2')
self.uf._doAddUser('user1', 'secret', ['role1'], [])
# Set up a published object accessible to user
self.folder.addDTMLMethod('doc', file='the document')
self.doc = self.folder.doc
self.doc.manage_permission(View, ['role1'], acquire=0)
# Rig the REQUEST so it looks like we traversed to doc
self.app.REQUEST['PUBLISHED'] = self.doc
self.app.REQUEST['PARENTS'] = [self.app, self.folder]
self.app.REQUEST.steps = list(self.doc.getPhysicalPath())
self.basic = basic_auth_encode('user1', 'secret')
# Make sure we are not logged in
self.logout()
def testGetUser(self):
self.assertNotEqual(self.uf.getUser('user1'), None)
def testGetBadUser(self):
self.assertEqual(self.uf.getUser('user2'), None)
def testGetUserById(self):
self.assertNotEqual(self.uf.getUserById('user1'), None)
def testGetBadUserById(self):
self.assertEqual(self.uf.getUserById('user2'), None)
@unittest.expectedFailure
def testGetUsers(self):
# Fails because of NotImplementedError
users = self.uf.getUsers()
self.assertTrue(users)
self.assertEqual(users[0].getUserName(), 'user1')
@unittest.expectedFailure
def testGetUserNames(self):
# Fails because of NotImplementedError
names = self.uf.getUserNames()
self.assertTrue(names)
self.assertEqual(names[0], 'user1')
@unittest.expectedFailure
def testIdentify(self):
# Fails because of NotImplementedError
name, password = self.uf.identify(self.basic)
self.assertEqual(name, 'user1')
self.assertEqual(password, 'secret')
def testGetRoles(self):
user = self.uf.getUser('user1')
self.assertTrue('role1' in user.getRoles())
self.assertFalse('role2' in user.getRoles())
def testGetRolesInContext(self):
user = self.uf.getUser('user1')
self.folder.manage_addLocalRoles('user1', ['role2'])
roles = user.getRolesInContext(self.folder)
self.assertTrue('role1' in roles)
self.assertTrue('role2' in roles)
def testHasRole(self):
user = self.uf.getUser('user1')
self.assertTrue(user.has_role('role1', self.folder))
def testHasLocalRole(self):
user = self.uf.getUser('user1')
self.assertFalse(user.has_role('role2', self.folder))
self.folder.manage_addLocalRoles('user1', ['role2'])
self.assertTrue(user.has_role('role2', self.folder))
def testHasPermission(self):
user = self.uf.getUser('user1')
self.assertTrue(user.has_permission(View, self.folder))
self.assertFalse(user.has_permission(AddFolders, self.folder))
self.folder.manage_role('role1', [AddFolders])
self.assertTrue(user.has_permission(AddFolders, self.folder))
def testHasLocalRolePermission(self):
user = self.uf.getUser('user1')
self.folder.manage_role('role2', [AddFolders])
self.assertFalse(user.has_permission(AddFolders, self.folder))
self.folder.manage_addLocalRoles('user1', ['role2'])
self.assertTrue(user.has_permission(AddFolders, self.folder))
@unittest.expectedFailure
def testAuthenticate(self):
# Fails because of NotImplementedError
user = self.uf.getUser('user1')
self.assertTrue(user.authenticate('secret', self.app.REQUEST))
def testValidate(self):
# ???: PAS validate ignores auth argument
self.app.REQUEST._auth = self.basic
user = self.uf.validate(self.app.REQUEST, self.basic, ['role1'])
self.assertNotEqual(user, None)
self.assertEqual(user.getUserName(), 'user1')
def testNotValidateWithoutAuth(self):
# ???: PAS validate ignores auth argument
user = self.uf.validate(self.app.REQUEST, '', ['role1'])
self.assertEqual(user, None)
def testValidateWithoutRoles(self):
# Note - calling uf.validate without specifying roles will cause
# the security machinery to determine the needed roles by looking
# at the object itself (or its container). I'm putting this note
# in to clarify because the original test expected failure but it
# really should have expected success, since the user and the
# object being checked both have the role 'role1', even though no
# roles are passed explicitly to the userfolder validate method.
# ???: PAS validate ignores auth argument
self.app.REQUEST._auth = self.basic
user = self.uf.validate(self.app.REQUEST, self.basic)
self.assertEqual(user.getUserName(), 'user1')
def testNotValidateWithEmptyRoles(self):
# ???: PAS validate ignores auth argument
self.app.REQUEST._auth = self.basic
user = self.uf.validate(self.app.REQUEST, self.basic, [])
self.assertEqual(user, None)
def testNotValidateWithWrongRoles(self):
# ???: PAS validate ignores auth argument
self.app.REQUEST._auth = self.basic
user = self.uf.validate(self.app.REQUEST, self.basic, ['role2'])
self.assertEqual(user, None)
def testAllowAccessToUser(self):
self.login('user1')
try:
self.folder.restrictedTraverse('doc')
except Unauthorized:
self.fail('Unauthorized')
def testDenyAccessToAnonymous(self):
self.assertRaises(Unauthorized, self.folder.restrictedTraverse, 'doc')
def testMaxListUsers(self):
# create a folder-ish thing which contains a roleManager,
# then put an acl_users object into the folde-ish thing
class Folderish(PluggableAuthService):
def __init__(self, size, count):
self.maxlistusers = size
self.users = []
self.acl_users = self
self.__allow_groups__ = self
for i in range(count):
self.users.append('Nobody')
def getUsers(self):
return self.users
def user_names(self):
return self.getUsers()
tinyFolderOver = Folderish(15, 20)
tinyFolderUnder = Folderish(15, 10)
assert tinyFolderOver.maxlistusers == 15
assert tinyFolderUnder.maxlistusers == 15
assert len(tinyFolderOver.user_names()) == 20
assert len(tinyFolderUnder.user_names()) == 10
with self.assertRaises(OverflowError):
tinyFolderOver.get_valid_userids()
try:
tinyFolderUnder.get_valid_userids()
except OverflowError:
self.fail('Raised overflow error erroneously')
def test__doAddUser_with_not_yet_encrypted_passwords(self):
# See collector #1869 && #1926
from AuthEncoding.AuthEncoding import is_encrypted
USER_ID = 'not_yet_encrypted'
PASSWORD = 'password'
self.assertFalse(is_encrypted(PASSWORD))
self.uf._doAddUser(USER_ID, PASSWORD, [], [])
uid_and_info = self.uf.users.authenticateCredentials(
{'login': USER_ID, 'password': PASSWORD})
self.assertEqual(uid_and_info, (USER_ID, USER_ID))
def test__doAddUser_with_preencrypted_passwords(self):
# See collector #1869 && #1926
from AuthEncoding.AuthEncoding import pw_encrypt
USER_ID = 'already_encrypted'
PASSWORD = 'password'
ENCRYPTED = pw_encrypt(PASSWORD)
self.uf._doAddUser(USER_ID, ENCRYPTED, [], [])
uid_and_info = self.uf.users.authenticateCredentials(
{'login': USER_ID, 'password': PASSWORD})
self.assertEqual(uid_and_info, (USER_ID, USER_ID))
def test_manage_zmi_logout(self):
request = self.app.REQUEST
response = request.RESPONSE
self.folder.manage_zmi_logout(request, response)
self.assertEqual(response.status, 401)
self.assertEqual(response.headers.get('WWW-Authenticate'),
'basic realm="%s"' % response.realm)
class UserTests(pastc.PASTestCase):
def afterSetUp(self):
self.uf = self.folder.acl_users
self.uf._doAddUser('chris', '123', ['Manager'], [])
self.user = self.uf.getUser('chris')
def testGetUserName(self):
f = self.user
self.assertEqual(f.getUserName(), 'chris')
def testGetUserId(self):
f = self.user
self.assertEqual(f.getId(), 'chris')
def testBaseUserGetIdEqualGetName(self):
# this is true for the default user type, but will not
# always be true for extended user types going forward (post-2.6)
f = self.user
self.assertEqual(f.getId(), f.getUserName())
@unittest.expectedFailure
def testGetPassword(self):
# fails because of NotImplementedError
f = self.user
self.assertEqual(f._getPassword(), '123')
def testGetRoles(self):
f = self.user
self.assertEqual(set(f.getRoles()), {'Authenticated', 'Manager'})
def testGetDomains(self):
f = self.user
self.assertEqual(f.getDomains(), ())
class UserEvents(pastc.PASTestCase):
def afterSetUp(self):
# Set up roles and a user
self.uf = self.folder.acl_users
self.folder._addRole('role1')
self.folder.manage_role('role1', [View])
self.uf.roles.addRole('role1')
self.folder._addRole('role2')
self.uf._doAddUser('user1', 'secret', ['role1'], [])
def testUserCreationEvent(self):
provideHandler(userCreatedHandler)
self.uf.events = []
self.uf._doAddUser('event1', 'secret', ['role1'], [])
self.assertEqual(len(self.uf.events), 1)
event = self.uf.events[0]
self.assertTrue(IPrincipalCreatedEvent.providedBy(event))
self.assertEqual(event.principal.getUserName(), 'event1')
self.assertEqual(event.principal.getId(), 'event1')
def testCredentialsEvent(self):
import functools
provideHandler(PASEventNotify)
provideHandler(userCredentialsUpdatedHandler)
def wrap(self, *args):
self._data.append(args)
return self._original(*args)
self.uf._data = []
self.uf._original = self.uf.updateCredentials
self.uf.updateCredentials = functools.partial(wrap, self.uf)
self.assertEqual(len(self.uf._data), 0)
event.notify(CredentialsUpdated(self.uf.getUserById('user1'),
'testpassword'))
self.assertEqual(self.uf._data[0][2], 'user1')
self.assertEqual(self.uf._data[0][3], 'testpassword')
| 36.374269 | 78 | 0.648633 |
2c6b07a791d67c725514c5e007fafc782845d51e | 1,540 | py | Python | transformers/LastStateTransformer.py | verenich/time-prediction-benchmark | 6075e4dc25077f26b679106bc569f99975cad825 | [
"Apache-2.0"
] | 11 | 2020-05-26T12:15:05.000Z | 2022-03-04T14:48:46.000Z | transformers/LastStateTransformer.py | samadeusfp/prescriptiveProcessMonitoring | 7b39c9b3cb20208d409e733e91cb91fb69dbf238 | [
"MIT"
] | 2 | 2019-05-28T08:59:33.000Z | 2020-05-23T09:43:05.000Z | transformers/LastStateTransformer.py | samadeusfp/prescriptiveProcessMonitoring | 7b39c9b3cb20208d409e733e91cb91fb69dbf238 | [
"MIT"
] | 7 | 2018-09-27T02:30:54.000Z | 2021-11-19T14:22:31.000Z | from sklearn.base import TransformerMixin
import pandas as pd
from time import time
class LastStateTransformer(TransformerMixin):
def __init__(self, case_id_col, cat_cols, num_cols, fillna=True):
self.case_id_col = case_id_col
self.cat_cols = cat_cols
self.num_cols = num_cols
self.fillna = fillna
self.columns = None
self.fit_time = 0
self.transform_time = 0
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
start = time()
dt_last = X.groupby(self.case_id_col).last()
# transform numeric cols
dt_transformed = dt_last[self.num_cols]
# transform cat cols
if len(self.cat_cols) > 0:
dt_cat = pd.get_dummies(dt_last[self.cat_cols])
dt_transformed = pd.concat([dt_transformed, dt_cat], axis=1)
# fill NA with 0 if requested
if self.fillna:
dt_transformed = dt_transformed.fillna(0)
# add missing columns if necessary
if self.columns is not None:
missing_cols = [col for col in self.columns if col not in dt_transformed.columns]
for col in missing_cols:
dt_transformed[col] = 0
dt_transformed = dt_transformed[self.columns]
else:
self.columns = dt_transformed.columns
self.transform_time = time() - start
return dt_transformed
| 29.056604 | 93 | 0.584416 |
73a7a9b6e995c6904b551d774ff0dea693e41f92 | 8,127 | py | Python | experimentmanager/models.py | sciexpem/sciexpem | 6de9a8039356588a5e817f0fa6bafd948220fc8f | [
"MIT"
] | null | null | null | experimentmanager/models.py | sciexpem/sciexpem | 6de9a8039356588a5e817f0fa6bafd948220fc8f | [
"MIT"
] | 3 | 2019-05-10T14:57:30.000Z | 2021-06-10T21:14:21.000Z | experimentmanager/models.py | sciexpem/sciexpem | 6de9a8039356588a5e817f0fa6bafd948220fc8f | [
"MIT"
] | 1 | 2020-09-11T09:16:15.000Z | 2020-09-11T09:16:15.000Z | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.contrib.postgres.fields import ArrayField
from django.urls import reverse
from enum import Enum
MAX_DIGITS = 42
DECIMAL_PLACES = 10
class EType(Enum):
batch_idt = "batch_idt"
stirred_parT = "stirred_parT"
flow_isothermal_parT = "flow_isothermal_parT"
flame_parPhi = "flame_parPhi"
rcm_idt = "rcm_idt"
@staticmethod
def _check_existence(common_names, columns_names, mandatory_common, mandatory_columns):
return all([i in common_names for i in mandatory_common]) and all(
[i in columns_names for i in mandatory_columns])
@staticmethod
def _check_not_existence(common_names, columns_names, forbidden_common, forbidden_columns):
return all([i not in common_names for i in forbidden_common]) and all(
[i not in columns_names for i in forbidden_columns])
@staticmethod
def retrieve_type(e):
common_properties = e.common_properties.all()
common_properties_names = set([c.name for c in common_properties])
data_columns = e.data_columns.all()
data_columns_names = set([d.name for d in data_columns])
if e.reactor == "flow reactor":
mandatory_common = ['residence time', 'pressure']
mandatory_columns = ['temperature', 'composition']
forbidden_columns = ['dT']
o1 = EType._check_existence(common_properties_names, data_columns_names, mandatory_common, mandatory_columns)
o2 = EType._check_not_existence(common_properties_names, data_columns_names, [], forbidden_columns)
if o1 and o2:
return EType.flow_isothermal_parT
if e.reactor == "stirred reactor":
mandatory_common = ['pressure', 'volume', 'residence time']
mandatory_columns = ['temperature', 'composition']
o = EType._check_existence(common_properties_names, data_columns_names, mandatory_common, mandatory_columns)
if o:
return EType.stirred_parT
if e.reactor == "shock tube":
mandatory_common = ['pressure']
mandatory_columns = ['ignition delay', 'temperature', 'volume', 'time']
o = EType._check_existence(common_properties_names, data_columns_names, mandatory_common, mandatory_columns)
if o:
# return EType.rcm_idt
return None
mandatory_common = ['pressure']
mandatory_columns = ['ignition delay', 'temperature']
o = EType._check_existence(common_properties_names, data_columns_names, mandatory_common, mandatory_columns)
if o:
return EType.batch_idt
if e.reactor == "flame":
mandatory_common = ['temperature', 'pressure']
mandatory_columns = ['laminar burning velocity', 'phi']
o = EType._check_existence(common_properties_names, data_columns_names, mandatory_common, mandatory_columns)
if o:
return EType.flame_parPhi
return None
class FilePaper(models.Model):
title = models.CharField(max_length=500)
reference_doi = models.CharField(max_length=100, unique=True, blank=True, null=True)
def get_absolute_url(self):
return reverse('filepaper', kwargs={'pk': self.pk})
class Experiment(models.Model):
reactor = models.CharField(max_length=100)
experiment_type = models.CharField(max_length=100)
fileDOI = models.CharField(max_length=100, unique=True)
temp = models.BooleanField()
file_paper = models.ForeignKey(FilePaper, on_delete=models.CASCADE, default=None, null=True)
ignition_type = models.CharField(max_length=100, blank=True, null=True)
def get_params_experiment(self):
common_properties = self.common_properties
data_columns = self.data_columns
params = dict()
pressure_common = common_properties.filter(name="pressure").first()
temperature_common = common_properties.filter(name="temperature").first()
phi_common = common_properties.filter(name="phi").first()
temperature_column = data_columns.filter(name="temperature").first()
phi_column = data_columns.filter(name="phi").first()
if pressure_common:
params["P"] = pressure_common.value
if temperature_common:
params["T"] = temperature_common.value
elif temperature_column:
params["T"] = min(temperature_column.data)
if phi_common:
params["phi"] = phi_common.value
elif phi_column:
params["phi"] = min(phi_column.data)
return params
def run_type(self):
return EType.retrieve_type(self)
@property
def run_type_str(self):
e_type = self.run_type()
if e_type is not None:
return e_type.value
else:
return None
class CommonProperty(models.Model):
name = models.CharField(max_length=100)
units = models.CharField(max_length=50)
value = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES)
sourcetype = models.CharField(max_length=50, null=True, blank=True)
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="common_properties")
def __str__(self):
return "%s %s %s" % (self.name, self.value, self.units)
class InitialSpecie(models.Model):
name = models.CharField(max_length=20)
units = models.CharField(max_length=50)
amount = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES)
cas = models.CharField(max_length=20, null=True, blank=True)
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="initial_species")
role = models.CharField(max_length=20, null=True, blank=True) # "fuel' and 'oxidizer'
def __str__(self):
return "%s %s %s" % (self.name, self.amount, self.units)
class DataColumn(models.Model):
name = models.CharField(max_length=100)
label = models.CharField(max_length=100, null=True, blank=True)
units = models.CharField(max_length=50)
species = ArrayField(models.CharField(max_length=20), null=True, blank=True)
data = ArrayField(models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES))
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="data_columns")
dg_id = models.CharField(max_length=10, null=False)
def range(self):
return self.data[0], self.data[-1]
def __str__(self):
return "%s %s %s" % (self.name, self.species, self.units)
class ChemModel(models.Model):
name = models.CharField(max_length=50)
version = models.CharField(max_length=200)
path = models.CharField(max_length=100)
class Execution(models.Model):
chemModel = models.ForeignKey(ChemModel, on_delete=models.CASCADE, related_name="executions")
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE, related_name="executions")
execution_start = models.DateTimeField()
execution_end = models.DateTimeField()
# todo: subclass as datacolumn
class ExecutionColumn(models.Model):
name = models.CharField(max_length=100)
label = models.CharField(max_length=100, null=True, blank=True)
units = models.CharField(max_length=50)
species = ArrayField(models.CharField(max_length=20), null=True, blank=True)
data = ArrayField(models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES))
execution = models.ForeignKey(Execution, on_delete=models.CASCADE, related_name="execution_columns")
def range(self):
return self.data[0], self.data[-1]
class CurveMatchingResult(models.Model):
execution_column = models.OneToOneField(ExecutionColumn, on_delete=models.CASCADE, related_name="curve_matching_result")
index = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES, null=True, blank=True)
error = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES, null=True, blank=True) | 38.885167 | 124 | 0.698782 |
fb3de9286cc3f0cb1cb0881d21edb5d603b6cc97 | 219 | py | Python | wuhan/20180403/h6.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | wuhan/20180403/h6.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | wuhan/20180403/h6.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | ":87, “caoxu”:90, “caohuan”:98, “wuhan”:82, “zhijia”:89}
1)将以上字典按成绩排名
'''
# sorted 排序
d = {"chaoqian":87,"caoxu":90,"caohuan":98,"wuhan":82,"zhijia":89}
print(sorted(d.items(),key = lambda i:i[1],reverse = True))
| 24.333333 | 66 | 0.611872 |
111888adba749e4ed3f8d2f9379abd6947b275f2 | 1,233 | py | Python | profiles_api/serializers.py | dshaunak/profiles-rest-api-proj | e4619669f2ab5e8cb9fc4847c6b4e4873355b4e5 | [
"MIT"
] | 1 | 2021-04-29T03:54:17.000Z | 2021-04-29T03:54:17.000Z | profiles_api/serializers.py | dshaunak/profiles-rest-api-proj | e4619669f2ab5e8cb9fc4847c6b4e4873355b4e5 | [
"MIT"
] | null | null | null | profiles_api/serializers.py | dshaunak/profiles-rest-api-proj | e4619669f2ab5e8cb9fc4847c6b4e4873355b4e5 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializers a name field from testing our API View"""
name = serializers.CharField(max_length=15)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
#Use extra_kwargs to make password write only, so users can't retireve passwords
extra_kwargs = {
'password': {
'write_only': True,
'style':{'input_type':'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
| 30.073171 | 88 | 0.627737 |
c8e3eeacb5504a62a56c53025a769c118e155a56 | 45,421 | py | Python | tests/security_tests.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | 3 | 2021-02-19T01:43:50.000Z | 2021-08-14T04:56:41.000Z | tests/security_tests.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | 39 | 2019-07-28T09:49:37.000Z | 2022-03-31T09:37:13.000Z | tests/security_tests.py | akashanita/superset-keycloak | 1d446ecf723d1cb0c43057d501cf1e7b83fa27ff | [
"Apache-2.0"
] | 1 | 2021-03-02T13:05:26.000Z | 2021-03-02T13:05:26.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import datetime
import inspect
import re
import unittest
from unittest.mock import Mock, patch
import pandas as pd
import prison
import pytest
import random
from flask import current_app, g
from sqlalchemy import Float, Date, String
from superset import app, appbuilder, db, security_manager, viz, ConnectorRegistry
from superset.connectors.druid.models import DruidCluster, DruidDatasource
from superset.connectors.sqla.models import RowLevelSecurityFilter, SqlaTable
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.models.core import Database
from superset.models.slice import Slice
from superset.sql_parse import Table
from superset.utils.core import get_example_database
from .base_tests import SupersetTestCase
from .dashboard_utils import (
create_table_for_dashboard,
create_slice,
create_dashboard,
)
from .fixtures.unicode_dashboard import load_unicode_dashboard_with_slice
def get_perm_tuples(role_name):
perm_set = set()
for perm in security_manager.find_role(role_name).permissions:
perm_set.add((perm.permission.name, perm.view_menu.name))
return perm_set
SCHEMA_ACCESS_ROLE = "schema_access_role"
def create_schema_perm(view_menu_name: str) -> None:
permission = "schema_access"
security_manager.add_permission_view_menu(permission, view_menu_name)
perm_view = security_manager.find_permission_view_menu(permission, view_menu_name)
security_manager.add_permission_role(
security_manager.find_role(SCHEMA_ACCESS_ROLE), perm_view
)
return None
def delete_schema_perm(view_menu_name: str) -> None:
pv = security_manager.find_permission_view_menu("schema_access", "[examples].[2]")
security_manager.del_permission_role(
security_manager.find_role(SCHEMA_ACCESS_ROLE), pv
)
security_manager.del_permission_view_menu("schema_access", "[examples].[2]")
return None
class TestRolePermission(SupersetTestCase):
"""Testing export role permissions."""
def setUp(self):
session = db.session
security_manager.add_role(SCHEMA_ACCESS_ROLE)
session.commit()
ds = (
db.session.query(SqlaTable)
.filter_by(table_name="wb_health_population")
.first()
)
ds.schema = "temp_schema"
ds.schema_perm = ds.get_schema_perm()
ds_slices = (
session.query(Slice)
.filter_by(datasource_type="table")
.filter_by(datasource_id=ds.id)
.all()
)
for s in ds_slices:
s.schema_perm = ds.schema_perm
create_schema_perm("[examples].[temp_schema]")
gamma_user = security_manager.find_user(username="gamma")
gamma_user.roles.append(security_manager.find_role(SCHEMA_ACCESS_ROLE))
session.commit()
def tearDown(self):
session = db.session
ds = (
session.query(SqlaTable)
.filter_by(table_name="wb_health_population")
.first()
)
schema_perm = ds.schema_perm
ds.schema = None
ds.schema_perm = None
ds_slices = (
session.query(Slice)
.filter_by(datasource_type="table")
.filter_by(datasource_id=ds.id)
.all()
)
for s in ds_slices:
s.schema_perm = None
delete_schema_perm(schema_perm)
session.delete(security_manager.find_role(SCHEMA_ACCESS_ROLE))
session.commit()
def test_set_perm_sqla_table(self):
session = db.session
table = SqlaTable(
schema="tmp_schema",
table_name="tmp_perm_table",
database=get_example_database(),
)
session.add(table)
session.commit()
stored_table = (
session.query(SqlaTable).filter_by(table_name="tmp_perm_table").one()
)
self.assertEqual(
stored_table.perm, f"[examples].[tmp_perm_table](id:{stored_table.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_table.perm
)
)
self.assertEqual(stored_table.schema_perm, "[examples].[tmp_schema]")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"schema_access", stored_table.schema_perm
)
)
# table name change
stored_table.table_name = "tmp_perm_table_v2"
session.commit()
stored_table = (
session.query(SqlaTable).filter_by(table_name="tmp_perm_table_v2").one()
)
self.assertEqual(
stored_table.perm, f"[examples].[tmp_perm_table_v2](id:{stored_table.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_table.perm
)
)
# no changes in schema
self.assertEqual(stored_table.schema_perm, "[examples].[tmp_schema]")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"schema_access", stored_table.schema_perm
)
)
# schema name change
stored_table.schema = "tmp_schema_v2"
session.commit()
stored_table = (
session.query(SqlaTable).filter_by(table_name="tmp_perm_table_v2").one()
)
self.assertEqual(
stored_table.perm, f"[examples].[tmp_perm_table_v2](id:{stored_table.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_table.perm
)
)
# no changes in schema
self.assertEqual(stored_table.schema_perm, "[examples].[tmp_schema_v2]")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"schema_access", stored_table.schema_perm
)
)
# database change
new_db = Database(sqlalchemy_uri="some_uri", database_name="tmp_db")
session.add(new_db)
stored_table.database = (
session.query(Database).filter_by(database_name="tmp_db").one()
)
session.commit()
stored_table = (
session.query(SqlaTable).filter_by(table_name="tmp_perm_table_v2").one()
)
self.assertEqual(
stored_table.perm, f"[tmp_db].[tmp_perm_table_v2](id:{stored_table.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_table.perm
)
)
# no changes in schema
self.assertEqual(stored_table.schema_perm, "[tmp_db].[tmp_schema_v2]")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"schema_access", stored_table.schema_perm
)
)
# no schema
stored_table.schema = None
session.commit()
stored_table = (
session.query(SqlaTable).filter_by(table_name="tmp_perm_table_v2").one()
)
self.assertEqual(
stored_table.perm, f"[tmp_db].[tmp_perm_table_v2](id:{stored_table.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_table.perm
)
)
self.assertIsNone(stored_table.schema_perm)
session.delete(new_db)
session.delete(stored_table)
session.commit()
def test_set_perm_druid_datasource(self):
session = db.session
druid_cluster = (
session.query(DruidCluster).filter_by(cluster_name="druid_test").one()
)
datasource = DruidDatasource(
datasource_name="tmp_datasource",
cluster=druid_cluster,
cluster_id=druid_cluster.id,
)
session.add(datasource)
session.commit()
# store without a schema
stored_datasource = (
session.query(DruidDatasource)
.filter_by(datasource_name="tmp_datasource")
.one()
)
self.assertEqual(
stored_datasource.perm,
f"[druid_test].[tmp_datasource](id:{stored_datasource.id})",
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_datasource.perm
)
)
self.assertIsNone(stored_datasource.schema_perm)
# store with a schema
stored_datasource.datasource_name = "tmp_schema.tmp_datasource"
session.commit()
self.assertEqual(
stored_datasource.perm,
f"[druid_test].[tmp_schema.tmp_datasource](id:{stored_datasource.id})",
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"datasource_access", stored_datasource.perm
)
)
self.assertIsNotNone(stored_datasource.schema_perm, "[druid_test].[tmp_schema]")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"schema_access", stored_datasource.schema_perm
)
)
session.delete(stored_datasource)
session.commit()
def test_set_perm_druid_cluster(self):
session = db.session
cluster = DruidCluster(cluster_name="tmp_druid_cluster")
session.add(cluster)
stored_cluster = (
session.query(DruidCluster)
.filter_by(cluster_name="tmp_druid_cluster")
.one()
)
self.assertEqual(
stored_cluster.perm, f"[tmp_druid_cluster].(id:{stored_cluster.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"database_access", stored_cluster.perm
)
)
stored_cluster.cluster_name = "tmp_druid_cluster2"
session.commit()
self.assertEqual(
stored_cluster.perm, f"[tmp_druid_cluster2].(id:{stored_cluster.id})"
)
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"database_access", stored_cluster.perm
)
)
session.delete(stored_cluster)
session.commit()
def test_set_perm_database(self):
session = db.session
database = Database(
database_name="tmp_database", sqlalchemy_uri="sqlite://test"
)
session.add(database)
stored_db = (
session.query(Database).filter_by(database_name="tmp_database").one()
)
self.assertEqual(stored_db.perm, f"[tmp_database].(id:{stored_db.id})")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"database_access", stored_db.perm
)
)
stored_db.database_name = "tmp_database2"
session.commit()
stored_db = (
session.query(Database).filter_by(database_name="tmp_database2").one()
)
self.assertEqual(stored_db.perm, f"[tmp_database2].(id:{stored_db.id})")
self.assertIsNotNone(
security_manager.find_permission_view_menu(
"database_access", stored_db.perm
)
)
session.delete(stored_db)
session.commit()
def test_hybrid_perm_druid_cluster(self):
cluster = DruidCluster(cluster_name="tmp_druid_cluster3")
db.session.add(cluster)
id_ = (
db.session.query(DruidCluster.id)
.filter_by(cluster_name="tmp_druid_cluster3")
.scalar()
)
record = (
db.session.query(DruidCluster)
.filter_by(perm=f"[tmp_druid_cluster3].(id:{id_})")
.one()
)
self.assertEqual(record.get_perm(), record.perm)
self.assertEqual(record.id, id_)
self.assertEqual(record.cluster_name, "tmp_druid_cluster3")
db.session.delete(cluster)
db.session.commit()
def test_hybrid_perm_database(self):
database = Database(
database_name="tmp_database3", sqlalchemy_uri="sqlite://test"
)
db.session.add(database)
id_ = (
db.session.query(Database.id)
.filter_by(database_name="tmp_database3")
.scalar()
)
record = (
db.session.query(Database)
.filter_by(perm=f"[tmp_database3].(id:{id_})")
.one()
)
self.assertEqual(record.get_perm(), record.perm)
self.assertEqual(record.id, id_)
self.assertEqual(record.database_name, "tmp_database3")
db.session.delete(database)
db.session.commit()
def test_set_perm_slice(self):
session = db.session
database = Database(
database_name="tmp_database", sqlalchemy_uri="sqlite://test"
)
table = SqlaTable(table_name="tmp_perm_table", database=database)
session.add(database)
session.add(table)
session.commit()
# no schema permission
slice = Slice(
datasource_id=table.id,
datasource_type="table",
datasource_name="tmp_perm_table",
slice_name="slice_name",
)
session.add(slice)
session.commit()
slice = session.query(Slice).filter_by(slice_name="slice_name").one()
self.assertEqual(slice.perm, table.perm)
self.assertEqual(slice.perm, f"[tmp_database].[tmp_perm_table](id:{table.id})")
self.assertEqual(slice.schema_perm, table.schema_perm)
self.assertIsNone(slice.schema_perm)
table.schema = "tmp_perm_schema"
table.table_name = "tmp_perm_table_v2"
session.commit()
# TODO(bogdan): modify slice permissions on the table update.
self.assertNotEquals(slice.perm, table.perm)
self.assertEqual(slice.perm, f"[tmp_database].[tmp_perm_table](id:{table.id})")
self.assertEqual(
table.perm, f"[tmp_database].[tmp_perm_table_v2](id:{table.id})"
)
# TODO(bogdan): modify slice schema permissions on the table update.
self.assertNotEquals(slice.schema_perm, table.schema_perm)
self.assertIsNone(slice.schema_perm)
# updating slice refreshes the permissions
slice.slice_name = "slice_name_v2"
session.commit()
self.assertEqual(slice.perm, table.perm)
self.assertEqual(
slice.perm, f"[tmp_database].[tmp_perm_table_v2](id:{table.id})"
)
self.assertEqual(slice.schema_perm, table.schema_perm)
self.assertEqual(slice.schema_perm, "[tmp_database].[tmp_perm_schema]")
session.delete(slice)
session.delete(table)
session.delete(database)
session.commit()
# TODO test slice permission
@patch("superset.security.manager.g")
def test_schemas_accessible_by_user_admin(self, mock_g):
mock_g.user = security_manager.find_user("admin")
with self.client.application.test_request_context():
database = get_example_database()
schemas = security_manager.get_schemas_accessible_by_user(
database, ["1", "2", "3"]
)
self.assertEqual(schemas, ["1", "2", "3"]) # no changes
@patch("superset.security.manager.g")
def test_schemas_accessible_by_user_schema_access(self, mock_g):
# User has schema access to the schema 1
create_schema_perm("[examples].[1]")
mock_g.user = security_manager.find_user("gamma")
with self.client.application.test_request_context():
database = get_example_database()
schemas = security_manager.get_schemas_accessible_by_user(
database, ["1", "2", "3"]
)
# temp_schema is not passed in the params
self.assertEqual(schemas, ["1"])
delete_schema_perm("[examples].[1]")
@patch("superset.security.manager.g")
def test_schemas_accessible_by_user_datasource_access(self, mock_g):
# User has schema access to the datasource temp_schema.wb_health_population in examples DB.
mock_g.user = security_manager.find_user("gamma")
with self.client.application.test_request_context():
database = get_example_database()
schemas = security_manager.get_schemas_accessible_by_user(
database, ["temp_schema", "2", "3"]
)
self.assertEqual(schemas, ["temp_schema"])
@patch("superset.security.manager.g")
def test_schemas_accessible_by_user_datasource_and_schema_access(self, mock_g):
# User has schema access to the datasource temp_schema.wb_health_population in examples DB.
create_schema_perm("[examples].[2]")
mock_g.user = security_manager.find_user("gamma")
with self.client.application.test_request_context():
database = get_example_database()
schemas = security_manager.get_schemas_accessible_by_user(
database, ["temp_schema", "2", "3"]
)
self.assertEqual(schemas, ["temp_schema", "2"])
vm = security_manager.find_permission_view_menu(
"schema_access", "[examples].[2]"
)
self.assertIsNotNone(vm)
delete_schema_perm("[examples].[2]")
def test_gamma_user_schema_access_to_dashboards(self):
self.login(username="gamma")
data = str(self.client.get("api/v1/dashboard/").data)
self.assertIn("/superset/dashboard/world_health/", data)
self.assertNotIn("/superset/dashboard/births/", data)
def test_gamma_user_schema_access_to_tables(self):
self.login(username="gamma")
data = str(self.client.get("tablemodelview/list/").data)
self.assertIn("wb_health_population", data)
self.assertNotIn("birth_names", data)
def test_gamma_user_schema_access_to_charts(self):
self.login(username="gamma")
data = str(self.client.get("api/v1/chart/").data)
self.assertIn(
"Life Expectancy VS Rural %", data
) # wb_health_population slice, has access
self.assertIn(
"Parallel Coordinates", data
) # wb_health_population slice, has access
self.assertNotIn("Girl Name Cloud", data) # birth_names slice, no access
def test_public_sync_role_data_perms(self):
"""
Security: Tests if the sync role method preserves data access permissions
if they already exist on a public role.
Also check that non data access permissions are removed
"""
table = db.session.query(SqlaTable).filter_by(table_name="birth_names").one()
self.grant_public_access_to_table(table)
public_role = security_manager.get_public_role()
unwanted_pvm = security_manager.find_permission_view_menu(
"menu_access", "Security"
)
public_role.permissions.append(unwanted_pvm)
db.session.commit()
security_manager.sync_role_definitions()
public_role = security_manager.get_public_role()
public_role_resource_names = [
permission.view_menu.name for permission in public_role.permissions
]
assert table.get_perm() in public_role_resource_names
assert "Security" not in public_role_resource_names
# Cleanup
self.revoke_public_access_to_table(table)
def test_public_sync_role_builtin_perms(self):
"""
Security: Tests public role creation based on a builtin role
"""
current_app.config["PUBLIC_ROLE_LIKE"] = "TestRole"
security_manager.sync_role_definitions()
public_role = security_manager.get_public_role()
public_role_resource_names = [
[permission.view_menu.name, permission.permission.name]
for permission in public_role.permissions
]
for pvm in current_app.config["FAB_ROLES"]["TestRole"]:
assert pvm in public_role_resource_names
# Cleanup
current_app.config["PUBLIC_ROLE_LIKE"] = "Gamma"
security_manager.sync_role_definitions()
def test_sqllab_gamma_user_schema_access_to_sqllab(self):
session = db.session
example_db = session.query(Database).filter_by(database_name="examples").one()
example_db.expose_in_sqllab = True
session.commit()
arguments = {
"keys": ["none"],
"filters": [{"col": "expose_in_sqllab", "opr": "eq", "value": True}],
"order_columns": "database_name",
"order_direction": "asc",
"page": 0,
"page_size": -1,
}
NEW_FLASK_GET_SQL_DBS_REQUEST = f"/api/v1/database/?q={prison.dumps(arguments)}"
self.login(username="gamma")
databases_json = self.client.get(NEW_FLASK_GET_SQL_DBS_REQUEST).json
self.assertEqual(databases_json["count"], 1)
self.logout()
def assert_can_read(self, view_menu, permissions_set):
self.assertIn(("can_list", view_menu), permissions_set)
def assert_can_write(self, view_menu, permissions_set):
self.assertIn(("can_add", view_menu), permissions_set)
self.assertIn(("can_delete", view_menu), permissions_set)
self.assertIn(("can_edit", view_menu), permissions_set)
def assert_cannot_write(self, view_menu, permissions_set):
self.assertNotIn(("can_add", view_menu), permissions_set)
self.assertNotIn(("can_delete", view_menu), permissions_set)
self.assertNotIn(("can_edit", view_menu), permissions_set)
self.assertNotIn(("can_save", view_menu), permissions_set)
def assert_can_all(self, view_menu, permissions_set):
self.assert_can_read(view_menu, permissions_set)
self.assert_can_write(view_menu, permissions_set)
def assert_can_menu(self, view_menu, permissions_set):
self.assertIn(("menu_access", view_menu), permissions_set)
def assert_can_gamma(self, perm_set):
self.assert_can_read("TableModelView", perm_set)
# make sure that user can create slices and dashboards
self.assert_can_all("SliceModelView", perm_set)
self.assert_can_all("DashboardModelView", perm_set)
self.assertIn(("can_add_slices", "Superset"), perm_set)
self.assertIn(("can_copy_dash", "Superset"), perm_set)
self.assertIn(("can_created_dashboards", "Superset"), perm_set)
self.assertIn(("can_created_slices", "Superset"), perm_set)
self.assertIn(("can_csv", "Superset"), perm_set)
self.assertIn(("can_dashboard", "Superset"), perm_set)
self.assertIn(("can_explore", "Superset"), perm_set)
self.assertIn(("can_explore_json", "Superset"), perm_set)
self.assertIn(("can_fave_dashboards", "Superset"), perm_set)
self.assertIn(("can_fave_slices", "Superset"), perm_set)
self.assertIn(("can_save_dash", "Superset"), perm_set)
self.assertIn(("can_slice", "Superset"), perm_set)
self.assertIn(("can_explore", "Superset"), perm_set)
self.assertIn(("can_explore_json", "Superset"), perm_set)
self.assertIn(("can_userinfo", "UserDBModelView"), perm_set)
self.assert_can_menu("Databases", perm_set)
self.assert_can_menu("Datasets", perm_set)
self.assert_can_menu("Data", perm_set)
self.assert_can_menu("Charts", perm_set)
self.assert_can_menu("Dashboards", perm_set)
def assert_can_alpha(self, perm_set):
self.assert_can_all("AnnotationLayerModelView", perm_set)
self.assert_can_all("CssTemplateModelView", perm_set)
self.assert_can_all("TableModelView", perm_set)
self.assert_can_read("QueryView", perm_set)
self.assertIn(("can_import_dashboards", "Superset"), perm_set)
self.assertIn(("can_this_form_post", "CsvToDatabaseView"), perm_set)
self.assertIn(("can_this_form_get", "CsvToDatabaseView"), perm_set)
self.assert_can_menu("Manage", perm_set)
self.assert_can_menu("Annotation Layers", perm_set)
self.assert_can_menu("CSS Templates", perm_set)
self.assert_can_menu("Upload a CSV", perm_set)
self.assertIn(("all_datasource_access", "all_datasource_access"), perm_set)
def assert_cannot_alpha(self, perm_set):
if app.config["ENABLE_ACCESS_REQUEST"]:
self.assert_cannot_write("AccessRequestsModelView", perm_set)
self.assert_can_all("AccessRequestsModelView", perm_set)
self.assert_cannot_write("Queries", perm_set)
self.assert_cannot_write("RoleModelView", perm_set)
self.assert_cannot_write("UserDBModelView", perm_set)
def assert_can_admin(self, perm_set):
self.assert_can_all("DatabaseView", perm_set)
self.assert_can_all("RoleModelView", perm_set)
self.assert_can_all("UserDBModelView", perm_set)
self.assertIn(("all_database_access", "all_database_access"), perm_set)
self.assertIn(("can_override_role_permissions", "Superset"), perm_set)
self.assertIn(("can_sync_druid_source", "Superset"), perm_set)
self.assertIn(("can_override_role_permissions", "Superset"), perm_set)
self.assertIn(("can_approve", "Superset"), perm_set)
self.assert_can_menu("Security", perm_set)
self.assert_can_menu("List Users", perm_set)
self.assert_can_menu("List Roles", perm_set)
def test_is_admin_only(self):
self.assertFalse(
security_manager._is_admin_only(
security_manager.find_permission_view_menu("can_list", "TableModelView")
)
)
self.assertFalse(
security_manager._is_admin_only(
security_manager.find_permission_view_menu(
"all_datasource_access", "all_datasource_access"
)
)
)
log_permissions = ["can_list", "can_show"]
for log_permission in log_permissions:
self.assertTrue(
security_manager._is_admin_only(
security_manager.find_permission_view_menu(
log_permission, "LogModelView"
)
)
)
if app.config["ENABLE_ACCESS_REQUEST"]:
self.assertTrue(
security_manager._is_admin_only(
security_manager.find_permission_view_menu(
"can_list", "AccessRequestsModelView"
)
)
)
self.assertTrue(
security_manager._is_admin_only(
security_manager.find_permission_view_menu(
"can_edit", "UserDBModelView"
)
)
)
self.assertTrue(
security_manager._is_admin_only(
security_manager.find_permission_view_menu("can_approve", "Superset")
)
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_is_alpha_only(self):
self.assertFalse(
security_manager._is_alpha_only(
security_manager.find_permission_view_menu("can_list", "TableModelView")
)
)
self.assertTrue(
security_manager._is_alpha_only(
security_manager.find_permission_view_menu(
"muldelete", "TableModelView"
)
)
)
self.assertTrue(
security_manager._is_alpha_only(
security_manager.find_permission_view_menu(
"all_datasource_access", "all_datasource_access"
)
)
)
self.assertTrue(
security_manager._is_alpha_only(
security_manager.find_permission_view_menu(
"all_database_access", "all_database_access"
)
)
)
def test_is_gamma_pvm(self):
self.assertTrue(
security_manager._is_gamma_pvm(
security_manager.find_permission_view_menu("can_list", "TableModelView")
)
)
def test_gamma_permissions_basic(self):
self.assert_can_gamma(get_perm_tuples("Gamma"))
self.assert_cannot_alpha(get_perm_tuples("Gamma"))
def test_public_permissions_basic(self):
self.assert_can_gamma(get_perm_tuples("Public"))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_alpha_permissions(self):
alpha_perm_tuples = get_perm_tuples("Alpha")
self.assert_can_gamma(alpha_perm_tuples)
self.assert_can_alpha(alpha_perm_tuples)
self.assert_cannot_alpha(alpha_perm_tuples)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_admin_permissions(self):
self.assert_can_gamma(get_perm_tuples("Admin"))
self.assert_can_alpha(get_perm_tuples("Admin"))
self.assert_can_admin(get_perm_tuples("Admin"))
def test_sql_lab_permissions(self):
sql_lab_set = get_perm_tuples("sql_lab")
self.assertIn(("can_sql_json", "Superset"), sql_lab_set)
self.assertIn(("can_csv", "Superset"), sql_lab_set)
self.assertIn(("can_search_queries", "Superset"), sql_lab_set)
self.assert_cannot_alpha(sql_lab_set)
def test_granter_permissions(self):
granter_set = get_perm_tuples("granter")
self.assertIn(("can_override_role_permissions", "Superset"), granter_set)
self.assertIn(("can_approve", "Superset"), granter_set)
self.assert_cannot_alpha(granter_set)
def test_gamma_permissions(self):
def assert_can_read(view_menu):
self.assertIn(("can_list", view_menu), gamma_perm_set)
def assert_can_write(view_menu):
self.assertIn(("can_add", view_menu), gamma_perm_set)
self.assertIn(("can_delete", view_menu), gamma_perm_set)
self.assertIn(("can_edit", view_menu), gamma_perm_set)
def assert_cannot_write(view_menu):
self.assertNotIn(("can_add", view_menu), gamma_perm_set)
self.assertNotIn(("can_delete", view_menu), gamma_perm_set)
self.assertNotIn(("can_edit", view_menu), gamma_perm_set)
self.assertNotIn(("can_save", view_menu), gamma_perm_set)
def assert_can_all(view_menu):
assert_can_read(view_menu)
assert_can_write(view_menu)
gamma_perm_set = set()
for perm in security_manager.find_role("Gamma").permissions:
gamma_perm_set.add((perm.permission.name, perm.view_menu.name))
# check read only perms
assert_can_read("TableModelView")
# make sure that user can create slices and dashboards
assert_can_all("SliceModelView")
assert_can_all("DashboardModelView")
assert_cannot_write("UserDBModelView")
assert_cannot_write("RoleModelView")
self.assertIn(("can_add_slices", "Superset"), gamma_perm_set)
self.assertIn(("can_copy_dash", "Superset"), gamma_perm_set)
self.assertIn(("can_created_dashboards", "Superset"), gamma_perm_set)
self.assertIn(("can_created_slices", "Superset"), gamma_perm_set)
self.assertIn(("can_csv", "Superset"), gamma_perm_set)
self.assertIn(("can_dashboard", "Superset"), gamma_perm_set)
self.assertIn(("can_explore", "Superset"), gamma_perm_set)
self.assertIn(("can_explore_json", "Superset"), gamma_perm_set)
self.assertIn(("can_fave_dashboards", "Superset"), gamma_perm_set)
self.assertIn(("can_fave_slices", "Superset"), gamma_perm_set)
self.assertIn(("can_save_dash", "Superset"), gamma_perm_set)
self.assertIn(("can_slice", "Superset"), gamma_perm_set)
self.assertIn(("can_userinfo", "UserDBModelView"), gamma_perm_set)
def test_views_are_secured(self):
"""Preventing the addition of unsecured views without has_access decorator"""
# These FAB views are secured in their body as opposed to by decorators
method_allowlist = ("action", "action_post")
# List of redirect & other benign views
views_allowlist = [
["MyIndexView", "index"],
["UtilView", "back"],
["LocaleView", "index"],
["AuthDBView", "login"],
["AuthDBView", "logout"],
["R", "index"],
["Superset", "log"],
["Superset", "theme"],
["Superset", "welcome"],
["SecurityApi", "login"],
["SecurityApi", "refresh"],
["SupersetIndexView", "index"],
]
unsecured_views = []
for view_class in appbuilder.baseviews:
class_name = view_class.__class__.__name__
for name, value in inspect.getmembers(
view_class, predicate=inspect.ismethod
):
if (
name not in method_allowlist
and [class_name, name] not in views_allowlist
and hasattr(value, "_urls")
and not hasattr(value, "_permission_name")
):
unsecured_views.append((class_name, name))
if unsecured_views:
view_str = "\n".join([str(v) for v in unsecured_views])
raise Exception(f"Some views are not secured:\n{view_str}")
class TestSecurityManager(SupersetTestCase):
"""
Testing the Security Manager.
"""
@patch("superset.security.SupersetSecurityManager.raise_for_access")
def test_can_access_datasource(self, mock_raise_for_access):
datasource = self.get_datasource_mock()
mock_raise_for_access.return_value = None
self.assertTrue(security_manager.can_access_datasource(datasource=datasource))
mock_raise_for_access.side_effect = SupersetSecurityException(
SupersetError(
"dummy",
SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
ErrorLevel.ERROR,
)
)
self.assertFalse(security_manager.can_access_datasource(datasource=datasource))
@patch("superset.security.SupersetSecurityManager.raise_for_access")
def test_can_access_table(self, mock_raise_for_access):
database = get_example_database()
table = Table("bar", "foo")
mock_raise_for_access.return_value = None
self.assertTrue(security_manager.can_access_table(database, table))
mock_raise_for_access.side_effect = SupersetSecurityException(
SupersetError(
"dummy",
SupersetErrorType.TABLE_SECURITY_ACCESS_ERROR,
ErrorLevel.ERROR,
)
)
self.assertFalse(security_manager.can_access_table(database, table))
@patch("superset.security.SupersetSecurityManager.can_access")
@patch("superset.security.SupersetSecurityManager.can_access_schema")
def test_raise_for_access_datasource(self, mock_can_access_schema, mock_can_access):
datasource = self.get_datasource_mock()
mock_can_access_schema.return_value = True
security_manager.raise_for_access(datasource=datasource)
mock_can_access.return_value = False
mock_can_access_schema.return_value = False
with self.assertRaises(SupersetSecurityException):
security_manager.raise_for_access(datasource=datasource)
@patch("superset.security.SupersetSecurityManager.can_access")
def test_raise_for_access_query(self, mock_can_access):
query = Mock(
database=get_example_database(), schema="bar", sql="SELECT * FROM foo"
)
mock_can_access.return_value = True
security_manager.raise_for_access(query=query)
mock_can_access.return_value = False
with self.assertRaises(SupersetSecurityException):
security_manager.raise_for_access(query=query)
@patch("superset.security.SupersetSecurityManager.can_access")
@patch("superset.security.SupersetSecurityManager.can_access_schema")
def test_raise_for_access_query_context(
self, mock_can_access_schema, mock_can_access
):
query_context = Mock(datasource=self.get_datasource_mock())
mock_can_access_schema.return_value = True
security_manager.raise_for_access(query_context=query_context)
mock_can_access.return_value = False
mock_can_access_schema.return_value = False
with self.assertRaises(SupersetSecurityException):
security_manager.raise_for_access(query_context=query_context)
@patch("superset.security.SupersetSecurityManager.can_access")
def test_raise_for_access_table(self, mock_can_access):
database = get_example_database()
table = Table("bar", "foo")
mock_can_access.return_value = True
security_manager.raise_for_access(database=database, table=table)
mock_can_access.return_value = False
with self.assertRaises(SupersetSecurityException):
security_manager.raise_for_access(database=database, table=table)
@patch("superset.security.SupersetSecurityManager.can_access")
@patch("superset.security.SupersetSecurityManager.can_access_schema")
def test_raise_for_access_viz(self, mock_can_access_schema, mock_can_access):
test_viz = viz.TableViz(self.get_datasource_mock(), form_data={})
mock_can_access_schema.return_value = True
security_manager.raise_for_access(viz=test_viz)
mock_can_access.return_value = False
mock_can_access_schema.return_value = False
with self.assertRaises(SupersetSecurityException):
security_manager.raise_for_access(viz=test_viz)
class TestRowLevelSecurity(SupersetTestCase):
"""
Testing Row Level Security
"""
rls_entry = None
query_obj = dict(
groupby=[],
metrics=[],
filter=[],
is_timeseries=False,
columns=["value"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
NAME_AB_ROLE = "NameAB"
NAME_Q_ROLE = "NameQ"
NAMES_A_REGEX = re.compile(r"name like 'A%'")
NAMES_B_REGEX = re.compile(r"name like 'B%'")
NAMES_Q_REGEX = re.compile(r"name like 'Q%'")
BASE_FILTER_REGEX = re.compile(r"gender = 'boy'")
def setUp(self):
session = db.session
# Create roles
security_manager.add_role(self.NAME_AB_ROLE)
security_manager.add_role(self.NAME_Q_ROLE)
gamma_user = security_manager.find_user(username="gamma")
gamma_user.roles.append(security_manager.find_role(self.NAME_AB_ROLE))
gamma_user.roles.append(security_manager.find_role(self.NAME_Q_ROLE))
self.create_user_with_roles("NoRlsRoleUser", ["Gamma"])
session.commit()
# Create regular RowLevelSecurityFilter (energy_usage, unicode_test)
self.rls_entry1 = RowLevelSecurityFilter()
self.rls_entry1.tables.extend(
session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(["energy_usage", "unicode_test"]))
.all()
)
self.rls_entry1.filter_type = "Regular"
self.rls_entry1.clause = "value > {{ cache_key_wrapper(1) }}"
self.rls_entry1.group_key = None
self.rls_entry1.roles.append(security_manager.find_role("Gamma"))
self.rls_entry1.roles.append(security_manager.find_role("Alpha"))
db.session.add(self.rls_entry1)
# Create regular RowLevelSecurityFilter (birth_names name starts with A or B)
self.rls_entry2 = RowLevelSecurityFilter()
self.rls_entry2.tables.extend(
session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(["birth_names"]))
.all()
)
self.rls_entry2.filter_type = "Regular"
self.rls_entry2.clause = "name like 'A%' or name like 'B%'"
self.rls_entry2.group_key = "name"
self.rls_entry2.roles.append(security_manager.find_role("NameAB"))
db.session.add(self.rls_entry2)
# Create Regular RowLevelSecurityFilter (birth_names name starts with Q)
self.rls_entry3 = RowLevelSecurityFilter()
self.rls_entry3.tables.extend(
session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(["birth_names"]))
.all()
)
self.rls_entry3.filter_type = "Regular"
self.rls_entry3.clause = "name like 'Q%'"
self.rls_entry3.group_key = "name"
self.rls_entry3.roles.append(security_manager.find_role("NameQ"))
db.session.add(self.rls_entry3)
# Create Base RowLevelSecurityFilter (birth_names boys)
self.rls_entry4 = RowLevelSecurityFilter()
self.rls_entry4.tables.extend(
session.query(SqlaTable)
.filter(SqlaTable.table_name.in_(["birth_names"]))
.all()
)
self.rls_entry4.filter_type = "Base"
self.rls_entry4.clause = "gender = 'boy'"
self.rls_entry4.group_key = "gender"
self.rls_entry4.roles.append(security_manager.find_role("Admin"))
db.session.add(self.rls_entry4)
db.session.commit()
def tearDown(self):
session = db.session
session.delete(self.rls_entry1)
session.delete(self.rls_entry2)
session.delete(self.rls_entry3)
session.delete(self.rls_entry4)
session.delete(security_manager.find_role("NameAB"))
session.delete(security_manager.find_role("NameQ"))
session.delete(self.get_user("NoRlsRoleUser"))
session.commit()
def test_rls_filter_alters_energy_query(self):
g.user = self.get_user(username="alpha")
tbl = self.get_table_by_name("energy_usage")
sql = tbl.get_query_str(self.query_obj)
assert tbl.get_extra_cache_keys(self.query_obj) == [1]
assert "value > 1" in sql
def test_rls_filter_doesnt_alter_energy_query(self):
g.user = self.get_user(
username="admin"
) # self.login() doesn't actually set the user
tbl = self.get_table_by_name("energy_usage")
sql = tbl.get_query_str(self.query_obj)
assert tbl.get_extra_cache_keys(self.query_obj) == []
assert "value > 1" not in sql
@pytest.mark.usefixtures("load_unicode_dashboard_with_slice")
def test_multiple_table_filter_alters_another_tables_query(self):
g.user = self.get_user(
username="alpha"
) # self.login() doesn't actually set the user
tbl = self.get_table_by_name("unicode_test")
sql = tbl.get_query_str(self.query_obj)
assert tbl.get_extra_cache_keys(self.query_obj) == [1]
assert "value > 1" in sql
def test_rls_filter_alters_gamma_birth_names_query(self):
g.user = self.get_user(username="gamma")
tbl = self.get_table_by_name("birth_names")
sql = tbl.get_query_str(self.query_obj)
# establish that the filters are grouped together correctly with
# ANDs, ORs and parens in the correct place
assert (
"WHERE ((name like 'A%'\n or name like 'B%')\n OR (name like 'Q%'))\n AND (gender = 'boy');"
in sql
)
def test_rls_filter_alters_no_role_user_birth_names_query(self):
g.user = self.get_user(username="NoRlsRoleUser")
tbl = self.get_table_by_name("birth_names")
sql = tbl.get_query_str(self.query_obj)
# gamma's filters should not be present query
assert not self.NAMES_A_REGEX.search(sql)
assert not self.NAMES_B_REGEX.search(sql)
assert not self.NAMES_Q_REGEX.search(sql)
# base query should be present
assert self.BASE_FILTER_REGEX.search(sql)
def test_rls_filter_doesnt_alter_admin_birth_names_query(self):
g.user = self.get_user(username="admin")
tbl = self.get_table_by_name("birth_names")
sql = tbl.get_query_str(self.query_obj)
# no filters are applied for admin user
assert not self.NAMES_A_REGEX.search(sql)
assert not self.NAMES_B_REGEX.search(sql)
assert not self.NAMES_Q_REGEX.search(sql)
assert not self.BASE_FILTER_REGEX.search(sql)
| 38.492373 | 118 | 0.650008 |
854e5667c642e31ff9995470b51fcb7d5e2ef24e | 7,345 | py | Python | push_notifications/gcm.py | esauro/django-push-notifications | 9abb342c5c9ead91e8c7f7cd82417936ebe234da | [
"MIT"
] | 3 | 2019-11-09T13:11:00.000Z | 2020-01-07T03:02:58.000Z | push_notifications/gcm.py | esauro/django-push-notifications | 9abb342c5c9ead91e8c7f7cd82417936ebe234da | [
"MIT"
] | 1 | 2020-01-17T20:42:19.000Z | 2020-01-17T20:42:19.000Z | push_notifications/gcm.py | esauro/django-push-notifications | 9abb342c5c9ead91e8c7f7cd82417936ebe234da | [
"MIT"
] | 3 | 2018-05-30T12:48:35.000Z | 2022-03-03T16:30:07.000Z | """
Firebase Cloud Messaging
Previously known as GCM / C2DM
Documentation is available on the Firebase Developer website:
https://firebase.google.com/docs/cloud-messaging/
"""
import json
from django.core.exceptions import ImproperlyConfigured
from .compat import Request, urlopen
from .conf import get_manager
from .exceptions import NotificationError
from .models import GCMDevice
# Valid keys for FCM messages. Reference:
# https://firebase.google.com/docs/cloud-messaging/http-server-ref
FCM_TARGETS_KEYS = [
"to", "condition", "notification_key"
]
FCM_OPTIONS_KEYS = [
"collapse_key", "priority", "content_available", "delay_while_idle", "time_to_live",
"restricted_package_name", "dry_run"
]
FCM_NOTIFICATIONS_PAYLOAD_KEYS = [
"title", "body", "icon", "sound", "badge", "color", "tag", "click_action",
"body_loc_key", "body_loc_args", "title_loc_key", "title_loc_args", "android_channel_id"
]
class GCMError(NotificationError):
pass
def _chunks(l, n):
"""
Yield successive chunks from list \a l with a minimum size \a n
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def _gcm_send(data, content_type, application_id):
key = get_manager().get_gcm_api_key(application_id)
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(get_manager().get_post_url("GCM", application_id), data, headers)
return urlopen(
request, timeout=get_manager().get_error_timeout("GCM", application_id)
).read().decode("utf-8")
def _fcm_send(data, content_type, application_id):
key = get_manager().get_fcm_api_key(application_id)
headers = {
"Content-Type": content_type,
"Authorization": "key=%s" % (key),
"Content-Length": str(len(data)),
}
request = Request(get_manager().get_post_url("FCM", application_id), data, headers)
return urlopen(
request, timeout=get_manager().get_error_timeout("FCM", application_id)
).read().decode("utf-8")
def _cm_handle_response(registration_ids, response_data, cloud_type, application_id=None):
response = response_data
if response.get("failure") or response.get("canonical_ids"):
ids_to_remove, old_new_ids = [], []
throw_error = False
for index, result in enumerate(response["results"]):
error = result.get("error")
if error:
# https://firebase.google.com/docs/cloud-messaging/http-server-ref#error-codes
# If error is NotRegistered or InvalidRegistration, then we will deactivate devices
# because this registration ID is no more valid and can't be used to send messages,
# otherwise raise error
if error in ("NotRegistered", "InvalidRegistration"):
ids_to_remove.append(registration_ids[index])
else:
throw_error = True
result["original_registration_id"] = registration_ids[index]
# If registration_id is set, replace the original ID with the new value (canonical ID)
# in your server database. Note that the original ID is not part of the result, you need
# to obtain it from the list of registration_ids in the request (using the same index).
new_id = result.get("registration_id")
if new_id:
old_new_ids.append((registration_ids[index], new_id))
if ids_to_remove:
removed = GCMDevice.objects.filter(
registration_id__in=ids_to_remove, cloud_message_type=cloud_type
)
removed.update(active=False)
for old_id, new_id in old_new_ids:
_cm_handle_canonical_id(new_id, old_id, cloud_type)
if throw_error:
raise GCMError(response)
return response
def _cm_send_request(
registration_ids, data, cloud_type="GCM", application_id=None,
use_fcm_notifications=True, **kwargs
):
"""
Sends a FCM or GCM notification to one or more registration_ids as json data.
The registration_ids needs to be a list.
"""
payload = {"registration_ids": registration_ids} if registration_ids else {}
data = data.copy()
# If using FCM, optionnally autodiscovers notification related keys
# https://firebase.google.com/docs/cloud-messaging/concept-options#notifications_and_data_messages
if cloud_type == "FCM" and use_fcm_notifications:
notification_payload = {}
if "message" in data:
notification_payload["body"] = data.pop("message", None)
for key in FCM_NOTIFICATIONS_PAYLOAD_KEYS:
value_from_extra = data.pop(key, None)
if value_from_extra:
notification_payload[key] = value_from_extra
value_from_kwargs = kwargs.pop(key, None)
if value_from_kwargs:
notification_payload[key] = value_from_kwargs
if notification_payload:
payload["notification"] = notification_payload
if data:
payload["data"] = data
# Attach any additional non falsy keyword args (targets, options)
# See ref : https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
payload.update({
k: v for k, v in kwargs.items() if v and (k in FCM_TARGETS_KEYS or k in FCM_OPTIONS_KEYS)
})
# Sort the keys for deterministic output (useful for tests)
json_payload = json.dumps(payload, separators=(",", ":"), sort_keys=True).encode("utf-8")
# Sends requests and handles the response
if cloud_type == "GCM":
response = json.loads(_gcm_send(
json_payload, "application/json", application_id=application_id
))
elif cloud_type == "FCM":
response = json.loads(_fcm_send(
json_payload, "application/json", application_id=application_id
))
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
return _cm_handle_response(registration_ids, response, cloud_type, application_id)
def _cm_handle_canonical_id(canonical_id, current_id, cloud_type):
"""
Handle situation when FCM server response contains canonical ID
"""
devices = GCMDevice.objects.filter(cloud_message_type=cloud_type)
if devices.filter(registration_id=canonical_id, active=True).exists():
devices.filter(registration_id=current_id).update(active=False)
else:
devices.filter(registration_id=current_id).update(registration_id=canonical_id)
def send_message(registration_ids, data, cloud_type, application_id=None, **kwargs):
"""
Sends a FCM (or GCM) notification to one or more registration_ids. The registration_ids
can be a list or a single string. This will send the notification as json data.
A reference of extra keyword arguments sent to the server is available here:
https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
"""
if cloud_type in ("FCM", "GCM"):
max_recipients = get_manager().get_max_recipients(cloud_type, application_id)
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
# Checks for valid recipient
if registration_ids is None and "/topics/" not in kwargs.get("to", ""):
return
# Bundles the registration_ids in an list if only one is sent
if not isinstance(registration_ids, list):
registration_ids = [registration_ids] if registration_ids else None
# FCM only allows up to 1000 reg ids per bulk message
# https://firebase.google.com/docs/cloud-messaging/server#http-request
if registration_ids:
ret = []
for chunk in _chunks(registration_ids, max_recipients):
ret.append(_cm_send_request(
chunk, data, cloud_type=cloud_type, application_id=application_id, **kwargs
))
return ret[0] if len(ret) == 1 else ret
else:
return _cm_send_request(None, data, cloud_type=cloud_type, **kwargs)
send_bulk_message = send_message
| 34.483568 | 99 | 0.75194 |
d50e6361862b8c61ec0b4f22ef0ef8f72b1b44fe | 1,228 | py | Python | tests/parsers/sqlite_plugins/kodi.py | roshanmaskey/plaso | 637856f578eb4bc81f62b97d7f483f69314e7f47 | [
"Apache-2.0"
] | 1,253 | 2015-01-02T13:58:02.000Z | 2022-03-31T08:43:39.000Z | tests/parsers/sqlite_plugins/kodi.py | roshanmaskey/plaso | 637856f578eb4bc81f62b97d7f483f69314e7f47 | [
"Apache-2.0"
] | 3,388 | 2015-01-02T11:17:58.000Z | 2022-03-30T10:21:45.000Z | tests/parsers/sqlite_plugins/kodi.py | roshanmaskey/plaso | 637856f578eb4bc81f62b97d7f483f69314e7f47 | [
"Apache-2.0"
] | 376 | 2015-01-20T07:04:54.000Z | 2022-03-04T23:53:00.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Kodi videos plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import kodi
from tests.parsers.sqlite_plugins import test_lib
class KodiVideosTest(test_lib.SQLitePluginTestCase):
"""Tests for the Kodi videos database plugin."""
def testProcess(self):
"""Test the Process function on a Kodi Videos database."""
plugin = kodi.KodiMyVideosPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['MyVideos107.db'], plugin)
self.assertEqual(storage_writer.number_of_events, 4)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'kodi:videos:viewing',
'date_time': '2017-07-16 04:54:54',
'filename': 'plugin://plugin.video.youtube/play/?video_id=7WX0-O_ENlk',
'play_count': 1,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 30.7 | 79 | 0.73127 |
3bb761c55ee39e3e74f8f9858d257644386c4be9 | 1,520 | py | Python | teslakit/util/operations.py | teslakit/teslak | 3f3dda08c5c5998cb2a7debbf22f2be675a4ff8b | [
"MIT"
] | 12 | 2019-11-14T22:19:12.000Z | 2022-03-04T01:25:33.000Z | teslakit/util/operations.py | anderdyl/teslaCoSMoS | 1495bfa2364ddbacb802d145b456a35213abfb7c | [
"MIT"
] | 5 | 2020-03-24T18:21:41.000Z | 2021-08-23T20:39:43.000Z | teslakit/util/operations.py | anderdyl/teslaCoSMoS | 1495bfa2364ddbacb802d145b456a35213abfb7c | [
"MIT"
] | 2 | 2021-03-06T07:54:41.000Z | 2021-06-30T14:33:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import numpy as np
from math import sqrt
def GetDivisors(x):
l_div = []
i = 1
while i<x:
if x%i == 0:
l_div.append(i)
i = i + 1
return l_div
def GetUniqueRows(np_array):
d = collections.OrderedDict()
for a in np_array:
t = tuple(a)
if t in d:
d[t] += 1
else:
d[t] = 1
result = []
for (key, value) in d.items():
result.append(list(key) + [value])
np_result = np.asarray(result)
return np_result
def GetBestRowsCols(n):
'try to square number n, used at gridspec plots'
sqrt_n = sqrt(n)
if sqrt_n.is_integer():
n_r = int(sqrt_n)
n_c = int(sqrt_n)
else:
l_div = GetDivisors(n)
n_c = l_div[int(len(l_div)/2)]
n_r = int(n/n_c)
return n_r, n_c
def GetRepeatedValues(series):
'Find adyacent repeated values inside series. Return list of tuples'
ix = 0
s0, s1 = None, None
l_subseq_index = []
while ix < len(series)-1:
# subsequence start
if series[ix] == series[ix+1] and s0==None: s0 = ix
# subsequence end
elif series[ix] != series[ix+1] and s0!=None: s1 = ix + 1
# series end
if ix == len(series)-2: s1 = ix + 2
# store subsequence
if s0!=None and s1!=None:
l_subseq_index.append((s0, s1))
s0, s1 = None, None
ix+=1
return l_subseq_index
| 20.540541 | 72 | 0.542105 |
2db5ec85b013138c52c34c6c1d714a9b7c51ce20 | 35,521 | py | Python | brian2/tests/test_functions.py | Debilski/brian2 | 560377aba16d8ddaba55fd50432b1142f6233f66 | [
"BSD-2-Clause"
] | null | null | null | brian2/tests/test_functions.py | Debilski/brian2 | 560377aba16d8ddaba55fd50432b1142f6233f66 | [
"BSD-2-Clause"
] | null | null | null | brian2/tests/test_functions.py | Debilski/brian2 | 560377aba16d8ddaba55fd50432b1142f6233f66 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
import os
import pytest
from numpy.testing import assert_equal
from brian2 import *
from brian2.core.functions import timestep
from brian2.parsing.sympytools import str_to_sympy, sympy_to_str
from brian2.utils.logger import catch_logs
from brian2.tests.utils import assert_allclose
from brian2.codegen.generators import CodeGenerator
from brian2.codegen.codeobject import CodeObject
@pytest.mark.codegen_independent
def test_constants_sympy():
'''
Make sure that symbolic constants are understood correctly by sympy
'''
assert sympy_to_str(str_to_sympy('1.0/inf')) == '0'
assert sympy_to_str(str_to_sympy('sin(pi)')) == '0'
assert sympy_to_str(str_to_sympy('log(e)')) == '1'
@pytest.mark.standalone_compatible
def test_constants_values():
'''
Make sure that symbolic constants use the correct values in code
'''
G = NeuronGroup(3, 'v : 1')
G.v[0] = 'pi'
G.v[1] = 'e'
G.v[2] = 'inf'
run(0*ms)
assert_allclose(G.v[:], [np.pi, np.e, np.inf])
def test_math_functions():
'''
Test that math functions give the same result, regardless of whether used
directly or in generated Python or C++ code.
'''
default_dt = defaultclock.dt
test_array = np.array([-1, -0.5, 0, 0.5, 1])
def int_(x):
return array(x, dtype=int)
int_.__name__ = 'int'
with catch_logs() as _: # Let's suppress warnings about illegal values
# Functions with a single argument
for func in [cos, tan, sinh, cosh, tanh,
arcsin, arccos, arctan,
log, log10,
exp, np.sqrt,
np.ceil, np.floor, np.sign, int_]:
# Calculate the result directly
numpy_result = func(test_array)
# Calculate the result in a somewhat complicated way by using a
# subexpression in a NeuronGroup
if func.__name__ == 'absolute':
# we want to use the name abs instead of absolute
func_name = 'abs'
else:
func_name = func.__name__
G = NeuronGroup(len(test_array),
'''func = {func}(variable) : 1
variable : 1'''.format(func=func_name))
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(numpy_result, mon.func_.flatten(),
err_msg='Function %s did not return the correct values' % func.__name__)
# Functions/operators
scalar = 3
for func, operator in [(np.power, '**'), (np.mod, '%')]:
# Calculate the result directly
numpy_result = func(test_array, scalar)
# Calculate the result in a somewhat complicated way by using a
# subexpression in a NeuronGroup
G = NeuronGroup(len(test_array),
'''func = variable {op} scalar : 1
variable : 1'''.format(op=operator))
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(numpy_result, mon.func_.flatten(),
err_msg='Function %s did not return the correct values' % func.__name__)
@pytest.mark.standalone_compatible
def test_clip():
G = NeuronGroup(4, '''
clipexpr1 = clip(integer_var1, 0, 1) : integer
clipexpr2 = clip(integer_var2, -0.5, 1.5) : integer
clipexpr3 = clip(float_var1, 0, 1) : 1
clipexpr4 = clip(float_var2, -0.5, 1.5) : 1
integer_var1 : integer
integer_var2 : integer
float_var1 : 1
float_var2 : 1
''')
G.integer_var1 = [0, 1, -1, 2]
G.integer_var2 = [0, 1, -1, 2]
G.float_var1 = [0., 1., -1., 2.]
G.float_var2 = [0., 1., -1., 2.]
s_mon = StateMonitor(G, ['clipexpr1', 'clipexpr2',
'clipexpr3', 'clipexpr4'], record=True)
run(defaultclock.dt)
assert_equal(s_mon.clipexpr1.flatten(), [0, 1, 0, 1])
assert_equal(s_mon.clipexpr2.flatten(), [0, 1, 0, 1])
assert_allclose(s_mon.clipexpr3.flatten(), [0, 1, 0, 1])
assert_allclose(s_mon.clipexpr4.flatten(), [0, 1, -0.5, 1.5])
@pytest.mark.standalone_compatible
def test_bool_to_int():
# Test that boolean expressions and variables are correctly converted into
# integers
G = NeuronGroup(2, '''
intexpr1 = int(bool_var) : integer
intexpr2 = int(float_var > 1.0) : integer
bool_var : boolean
float_var : 1
''')
G.bool_var = [True, False]
G.float_var = [2.0, 0.5]
s_mon = StateMonitor(G, ['intexpr1', 'intexpr2'], record=True)
run(defaultclock.dt)
assert_equal(s_mon.intexpr1.flatten(), [1, 0])
assert_equal(s_mon.intexpr2.flatten(), [1, 0])
@pytest.mark.codegen_independent
def test_timestep_function():
dt = defaultclock.dt_
# Check that multiples of dt end up in the correct time step
t = np.arange(100000)*dt
assert_equal(timestep(t, dt), np.arange(100000))
# Scalar values should stay scalar
ts = timestep(0.0005, 0.0001)
assert np.isscalar(ts) and ts == 5
# Length-1 arrays should stay arrays
ts = timestep(np.array([0.0005]), 0.0001)
assert ts.shape == (1,) and ts == 5
@pytest.mark.standalone_compatible
def test_timestep_function_during_run():
group = NeuronGroup(2, '''ref_t : second
ts = timestep(ref_t, dt) + timestep(t, dt) : integer''')
group.ref_t = [-1e4*second, 5*defaultclock.dt]
mon = StateMonitor(group, 'ts', record=True)
run(5*defaultclock.dt)
assert all(mon.ts[0] <= -1e4)
assert_equal(mon.ts[1], [5, 6, 7, 8, 9])
@pytest.mark.standalone_compatible
def test_user_defined_function():
@implementation('cpp',"""
inline double usersin(double x)
{
return sin(x);
}
""")
@implementation('cython', '''
cdef double usersin(double x):
return sin(x)
''')
@check_units(x=1, result=1)
def usersin(x):
return np.sin(x)
default_dt = defaultclock.dt
test_array = np.array([0, 1, 2, 3])
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''')
G.variable = test_array
mon = StateMonitor(G, 'func', record=True)
run(default_dt)
assert_allclose(np.sin(test_array), mon.func_.flatten())
def test_user_defined_function_units():
'''
Test the preparation of functions for use in code with check_units.
'''
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
def nothing_specified(x, y, z):
return x*(y+z)
no_result_unit = check_units(x=1, y=second, z=second)(nothing_specified)
one_arg_missing = check_units(x=1, z=second, result=second)(nothing_specified)
all_specified = check_units(x=1, y=second, z=second, result=second)(nothing_specified)
G = NeuronGroup(1, '''a : 1
b : second
c : second''',
namespace={'nothing_specified': nothing_specified,
'no_result_unit': no_result_unit,
'one_arg_missing': one_arg_missing,
'all_specified': all_specified})
net = Network(G)
net.run(0*ms) # make sure we have a clock and therefore a t
G.c = 'all_specified(a, b, t)'
with pytest.raises(ValueError):
setattr(G, 'c', 'one_arg_missing(a, b, t)')
with pytest.raises(ValueError):
setattr(G, 'c', 'no_result_unit(a, b, t)')
with pytest.raises(KeyError):
setattr(G, 'c', 'nothing_specified(a, b, t)')
with pytest.raises(DimensionMismatchError):
setattr(G, 'a', 'all_specified(a, b, t)')
with pytest.raises(DimensionMismatchError):
setattr(G, 'a', 'all_specified(b, a, t)')
def test_simple_user_defined_function():
# Make sure that it's possible to use a Python function directly, without
# additional wrapping
@check_units(x=1, result=1)
def usersin(x):
return np.sin(x)
usersin.stateless = True
default_dt = defaultclock.dt
test_array = np.array([0, 1, 2, 3])
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''',
codeobj_class=NumpyCodeObject)
G.variable = test_array
mon = StateMonitor(G, 'func', record=True, codeobj_class=NumpyCodeObject)
net = Network(G, mon)
net.run(default_dt)
assert_allclose(np.sin(test_array), mon.func_.flatten())
# Check that it raises an error for C++
try:
import scipy.weave
G = NeuronGroup(len(test_array),
'''func = usersin(variable) : 1
variable : 1''',
codeobj_class=WeaveCodeObject)
mon = StateMonitor(G, 'func', record=True,
codeobj_class=WeaveCodeObject)
net = Network(G, mon)
with pytest.raises(NotImplementedError):
net.run(0.1*ms)
except ImportError:
pass
def test_manual_user_defined_function():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
default_dt = defaultclock.dt
# User defined function without any decorators
def foo(x, y):
return x + y + 3*volt
orig_foo = foo
# Since the function is not annotated with check units, we need to specify
# both the units of the arguments and the return unit
with pytest.raises(ValueError):
Function(foo, return_unit=volt)
with pytest.raises(ValueError):
Function(foo, arg_units=[volt, volt])
foo = Function(foo, arg_units=[volt, volt], return_unit=volt)
assert foo(1*volt, 2*volt) == 6*volt
# Incorrect argument units
group = NeuronGroup(1, '''
dv/dt = foo(x, y)/ms : volt
x : 1
y : 1''')
net = Network(group)
with pytest.raises(DimensionMismatchError):
net.run(0*ms, namespace={ 'foo': foo})
# Incorrect output unit
group = NeuronGroup(1, '''
dv/dt = foo(x, y)/ms : 1
x : volt
y : volt''')
net = Network(group)
with pytest.raises(DimensionMismatchError):
net.run(0*ms, namespace={'foo': foo})
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert mon[0].func == [6] * volt
# discard units
foo.implementations.add_numpy_implementation(orig_foo,
discard_units=True)
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(default_dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
# User defined function without any decorators
def foo(x, y):
return x + y + 3*volt
foo = Function(foo, arg_units=[volt, volt], return_unit=volt)
code = {'support_code': '''
inline double foo(const double x, const double y)
{
return x + y + 3;
}
'''}
foo.implementations.add_implementation('cpp', code)
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_compiler_args():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', # just check whether we can specify the supported compiler args,
# only the define macro is actually used
headers=[], sources=[], libraries=[], include_dirs=[],
library_dirs=[], runtime_library_dirs=[],
define_macros=[('_THREE', '3')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args1():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args2():
set_device('cpp_standalone', directory=None)
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', headers='<stdio.h>') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_weave_compiler_args():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', # just check whether we can specify the supported compiler args,
# only the define macro is actually used
headers=[], sources=[], libraries=[], include_dirs=[],
library_dirs=[], runtime_library_dirs=[],
define_macros=[('_THREE', '3')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_weave_wrong_compiler_args1():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_weave_wrong_compiler_args2():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
static inline double foo(const double x, const double y)
{
return x + y + _THREE;
}''', headers='<stdio.h>') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_cython_compiler_args():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', # just check whether we can specify the supported compiler args,
libraries=[], include_dirs=[], library_dirs=[], runtime_library_dirs=[])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_manual_user_defined_function_cython_wrong_compiler_args1():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', some_arg=[]) # non-existing argument
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(ValueError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_manual_user_defined_function_cython_wrong_compiler_args2():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
@implementation('cython', '''
cdef double foo(double x, const double y):
return x + y + 3
''', libraries='cstdio') # existing argument, wrong value type
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
with pytest.raises(TypeError):
net.run(defaultclock.dt, namespace={'foo': foo})
def test_external_function_cython():
if prefs.codegen.target != 'cython':
pytest.skip('Cython-only test')
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cython', 'from func_def_cython cimport foo',
sources=[os.path.join(this_dir, 'func_def_cython.pyx')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
def test_external_function_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cpp', '//all code in func_def_cpp.cpp',
headers=['"func_def_cpp.h"'],
include_dirs=[this_dir],
sources=[os.path.join(this_dir, 'func_def_cpp.cpp')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_external_function_cpp_standalone():
set_device('cpp_standalone', directory=None)
this_dir = os.path.abspath(os.path.dirname(__file__))
@implementation('cpp', '//all code in func_def_cpp.cpp',
headers=['"func_def_cpp.h"'],
include_dirs=[this_dir],
sources=[os.path.join(this_dir, 'func_def_cpp.cpp')])
@check_units(x=volt, y=volt, result=volt)
def foo(x, y):
return x + y + 3*volt
G = NeuronGroup(1, '''
func = foo(x, y) : volt
x : volt
y : volt''')
G.x = 1*volt
G.y = 2*volt
mon = StateMonitor(G, 'func', record=True)
net = Network(G, mon)
net.run(defaultclock.dt)
assert mon[0].func == [6] * volt
@pytest.mark.codegen_independent
def test_user_defined_function_discarding_units():
# A function with units that should discard units also inside the function
@implementation('numpy', discard_units=True)
@check_units(v=volt, result=volt)
def foo(v):
return v + 3*volt # this normally raises an error for unitless v
assert foo(5*volt) == 8*volt
# Test the function that is used during a run
assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8
@pytest.mark.codegen_independent
def test_user_defined_function_discarding_units_2():
# Add a numpy implementation explicitly (as in TimedArray)
unit = volt
@check_units(v=volt, result=unit)
def foo(v):
return v + 3*unit # this normally raises an error for unitless v
foo = Function(pyfunc=foo)
def unitless_foo(v):
return v + 3
foo.implementations.add_implementation('numpy', code=unitless_foo)
assert foo(5*volt) == 8*volt
# Test the function that is used during a run
assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8
@pytest.mark.codegen_independent
def test_function_implementation_container():
import brian2.codegen.targets as targets
class ACodeGenerator(CodeGenerator):
class_name = 'A Language'
class BCodeGenerator(CodeGenerator):
class_name = 'B Language'
class ACodeObject(CodeObject):
generator_class = ACodeGenerator
class_name = 'A'
class A2CodeObject(CodeObject):
generator_class = ACodeGenerator
class_name = 'A2'
class BCodeObject(CodeObject):
generator_class = BCodeGenerator
class_name = 'B'
# Register the code generation targets
_previous_codegen_targets = set(targets.codegen_targets)
targets.codegen_targets = {ACodeObject, BCodeObject}
@check_units(x=volt, result=volt)
def foo(x):
return x
f = Function(foo)
container = f.implementations
# inserting into the container with a CodeGenerator class
container.add_implementation(BCodeGenerator, code='implementation B language')
assert container[BCodeGenerator].get_code(None) == 'implementation B language'
# inserting into the container with a CodeObject class
container.add_implementation(ACodeObject, code='implementation A CodeObject')
assert container[ACodeObject].get_code(None) == 'implementation A CodeObject'
# inserting into the container with a name of a CodeGenerator
container.add_implementation('A Language', 'implementation A Language')
assert container['A Language'].get_code(None) == 'implementation A Language'
assert container[ACodeGenerator].get_code(None) == 'implementation A Language'
assert container[A2CodeObject].get_code(None) == 'implementation A Language'
# inserting into the container with a name of a CodeObject
container.add_implementation('B', 'implementation B CodeObject')
assert container['B'].get_code(None) == 'implementation B CodeObject'
assert container[BCodeObject].get_code(None) == 'implementation B CodeObject'
with pytest.raises(KeyError):
container['unknown']
# some basic dictionary properties
assert len(container) == 4
assert set((key for key in container)) == {'A Language', 'B', ACodeObject,
BCodeGenerator}
# Restore the previous codegeneration targets
targets.codegen_targets = _previous_codegen_targets
def test_function_dependencies_weave():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
float foo(float x)
{
return 42*0.001;
}''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cpp', '''
float bar(float x)
{
return 2*foo(x);
}''', dependencies={'foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_weave_rename():
if prefs.codegen.target != 'weave':
pytest.skip('weave-only test')
@implementation('cpp', '''
float _foo(float x)
{
return 42*0.001;
}''', name='_foo')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cpp', '''
float bar(float x)
{
return 2*my_foo(x);
}''', dependencies={'my_foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_cython():
if prefs.codegen.target != 'cython':
pytest.skip('cython-only test')
@implementation('cython', '''
cdef float foo(float x):
return 42*0.001
''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cython', '''
cdef float bar(float x):
return 2*foo(x)
''', dependencies={'foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_cython_rename():
if prefs.codegen.target != 'cython':
pytest.skip('cython-only test')
@implementation('cython', '''
cdef float _foo(float x):
return 42*0.001
''', name='_foo')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for numpy and an
# implementation for C++ that makes use of the previous function.
@implementation('cython', '''
cdef float bar(float x):
return 2*my_foo(x)
''', dependencies={'my_foo': foo})
@check_units(x=volt, result=volt)
def bar(x):
return 84*mV
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
def test_function_dependencies_numpy():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
@implementation('cpp', '''
float foo(float x)
{
return 42*0.001;
}''')
@check_units(x=volt, result=volt)
def foo(x):
return 42*mV
# Second function with an independent implementation for C++ and an
# implementation for numpy that makes use of the previous function.
# Note that we don't need to use the explicit dependencies mechanism for
# numpy, since the Python function stores a reference to the referenced
# function directly
@implementation('cpp', '''
float bar(float x)
{
return 84*0.001;
}''')
@check_units(x=volt, result=volt)
def bar(x):
return 2*foo(x)
G = NeuronGroup(5, 'v : volt')
G.run_regularly('v = bar(v)')
net = Network(G)
net.run(defaultclock.dt)
assert_allclose(G.v_[:], 84*0.001)
@pytest.mark.standalone_compatible
def test_repeated_function_dependencies():
# each of the binomial functions adds randn as a depency, see #988
test_neuron = NeuronGroup(1, 'x : 1',
namespace={'bino_1': BinomialFunction(10, 0.5),
'bino_2': BinomialFunction(10, 0.6)})
test_neuron.x = 'bino_1()+bino_2()'
run(0 * ms)
@pytest.mark.standalone_compatible
def test_binomial():
binomial_f_approximated = BinomialFunction(100, 0.1, approximate=True)
binomial_f = BinomialFunction(100, 0.1, approximate=False)
# Just check that it does not raise an error and that it produces some
# values
G = NeuronGroup(1, '''x : 1
y : 1''')
G.run_regularly('''x = binomial_f_approximated()
y = binomial_f()''')
mon = StateMonitor(G, ['x', 'y'], record=0)
run(1*ms)
assert np.var(mon[0].x) > 0
assert np.var(mon[0].y) > 0
@pytest.mark.standalone_compatible
def test_poisson():
# Just check that it does not raise an error and that it produces some
# values
G = NeuronGroup(5, '''l : 1
x : integer
y : integer
z : integer
''')
G.l = [0, 1, 5, 15, 25]
G.run_regularly('''x = poisson(l)
y = poisson(5)
z = poisson(0)''')
mon = StateMonitor(G, ['x', 'y', 'z'], record=True)
run(100*defaultclock.dt)
assert_equal(mon.x[0], 0)
assert all(np.var(mon.x[1:], axis=1) > 0)
assert all(np.var(mon.y, axis=1) > 0)
assert_equal(mon.z, 0)
def test_declare_types():
if prefs.codegen.target != 'numpy':
pytest.skip('numpy-only test')
@declare_types(a='integer', b='float', result='highest')
def f(a, b):
return a*b
assert f._arg_types==['integer', 'float']
assert f._return_type == 'highest'
@declare_types(b='float')
def f(a, b, c):
return a*b*c
assert f._arg_types==['any', 'float', 'any']
assert f._return_type == 'float'
def bad_argtype():
@declare_types(b='floating')
def f(a, b, c):
return a*b*c
with pytest.raises(ValueError):
bad_argtype()
def bad_argname():
@declare_types(d='floating')
def f(a, b, c):
return a*b*c
with pytest.raises(ValueError):
bad_argname()
@check_units(a=volt, b=1)
@declare_types(a='float', b='integer')
def f(a, b):
return a*b
@declare_types(a='float', b='integer')
@check_units(a=volt, b=1)
def f(a, b):
return a*b
def bad_units():
@declare_types(a='integer', b='float')
@check_units(a=volt, b=1, result=volt)
def f(a, b):
return a*b
eqs = '''
dv/dt = f(v, 1)/second : 1
'''
G = NeuronGroup(1, eqs)
Network(G).run(1*ms)
with pytest.raises(TypeError):
bad_units()
def bad_type():
@implementation('numpy', discard_units=True)
@declare_types(a='float', result='float')
@check_units(a=1, result=1)
def f(a):
return a
eqs = '''
a : integer
dv/dt = f(a)*v/second : 1
'''
G = NeuronGroup(1, eqs)
Network(G).run(1*ms)
with pytest.raises(TypeError):
bad_type()
def test_multiple_stateless_function_calls():
# Check that expressions such as rand() + rand() (which might be incorrectly
# simplified to 2*rand()) raise an error
G = NeuronGroup(1, 'dv/dt = (rand() - rand())/second : 1')
net = Network(G)
with pytest.raises(NotImplementedError):
net.run(0*ms)
G2 = NeuronGroup(1, 'v:1', threshold='v>1', reset='v=rand() - rand()')
net2 = Network(G2)
with pytest.raises(NotImplementedError):
net2.run(0*ms)
G3 = NeuronGroup(1, 'v:1')
G3.run_regularly('v = rand() - rand()')
net3 = Network(G3)
with pytest.raises(NotImplementedError):
net3.run(0*ms)
if __name__ == '__main__':
from brian2 import prefs
# prefs.codegen.target = 'numpy'
import time
from _pytest.outcomes import Skipped
for f in [
test_constants_sympy,
test_constants_values,
test_math_functions,
test_clip,
test_bool_to_int,
test_timestep_function,
test_timestep_function_during_run,
test_user_defined_function,
test_user_defined_function_units,
test_simple_user_defined_function,
test_manual_user_defined_function,
test_manual_user_defined_function_weave,
test_external_function_cython,
test_external_function_weave,
test_user_defined_function_discarding_units,
test_user_defined_function_discarding_units_2,
test_function_implementation_container,
test_function_dependencies_numpy,
test_function_dependencies_weave,
test_function_dependencies_weave_rename,
test_function_dependencies_cython,
test_function_dependencies_cython_rename,
test_repeated_function_dependencies,
test_binomial,
test_poisson,
test_declare_types,
test_multiple_stateless_function_calls,
]:
try:
start = time.time()
f()
print('Test', f.__name__, 'took', time.time()-start)
except Skipped as e:
print('Skipping test', f.__name__, e)
| 32.350638 | 100 | 0.585485 |
083f667265a00673e8a5f99da84d6d745b3f2d6d | 13,398 | py | Python | maize/daemon/keychain_proxy.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 14 | 2021-07-21T19:45:05.000Z | 2022-02-09T04:29:51.000Z | maize/daemon/keychain_proxy.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 9 | 2021-07-24T09:30:46.000Z | 2021-12-05T19:51:29.000Z | maize/daemon/keychain_proxy.py | denern/maize-blockchain | b8639899f44b03232dda90c706d061e5e1158ca3 | [
"Apache-2.0"
] | 5 | 2021-10-04T17:33:47.000Z | 2022-03-15T08:37:51.000Z | import logging
import ssl
from blspy import AugSchemeMPL, PrivateKey
from maize.cmds.init_funcs import check_keys
from maize.daemon.client import DaemonProxy
from maize.daemon.keychain_server import (
KEYCHAIN_ERR_KEYERROR,
KEYCHAIN_ERR_LOCKED,
KEYCHAIN_ERR_MALFORMED_REQUEST,
KEYCHAIN_ERR_NO_KEYS,
)
from maize.server.server import ssl_context_for_client
from maize.util.config import load_config
from maize.util.keychain import (
Keychain,
KeyringIsLocked,
bytes_to_mnemonic,
mnemonic_to_seed,
supports_keyring_passphrase,
)
from maize.util.ws_message import WsRpcMessage
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
class KeyringIsEmpty(Exception):
pass
class MalformedKeychainRequest(Exception):
pass
class MalformedKeychainResponse(Exception):
pass
class KeychainProxyConnectionFailure(Exception):
pass
class KeychainProxy(DaemonProxy):
"""
KeychainProxy can act on behalf of a local or remote keychain. In the case of
wrapping a local keychain, the proxy object simply forwards-along the calls to
the underlying local keychain. In the remote case, calls are made to the daemon
over the RPC interface, allowing the daemon to act as the keychain authority.
"""
def __init__(
self,
log: logging.Logger,
uri: str = None,
ssl_context: Optional[ssl.SSLContext] = None,
local_keychain: Optional[Keychain] = None,
user: str = None,
service: str = None,
):
self.log = log
if local_keychain:
self.keychain = local_keychain
elif not supports_keyring_passphrase():
self.keychain = Keychain() # Proxy locally, don't use RPC
else:
self.keychain = None # type: ignore
self.keychain_user = user
self.keychain_service = service
super().__init__(uri or "", ssl_context)
def use_local_keychain(self) -> bool:
"""
Indicates whether the proxy forwards calls to a local keychain
"""
return self.keychain is not None
def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage:
"""
Overrides DaemonProxy.format_request() to add keychain-specific RPC params
"""
if data is None:
data = {}
if self.keychain_user or self.keychain_service:
data["kc_user"] = self.keychain_user
data["kc_service"] = self.keychain_service
return super().format_request(command, data)
async def get_response_for_request(self, request_name: str, data: Dict[str, Any]) -> Tuple[WsRpcMessage, bool]:
request = self.format_request(request_name, data)
response = await self._get(request)
success = response["data"].get("success", False)
return response, success
def handle_error(self, response: WsRpcMessage):
"""
Common error handling for RPC responses
"""
error = response["data"].get("error", None)
if error:
error_details = response["data"].get("error_details", {})
if error == KEYCHAIN_ERR_LOCKED:
raise KeyringIsLocked()
elif error == KEYCHAIN_ERR_NO_KEYS:
raise KeyringIsEmpty()
elif error == KEYCHAIN_ERR_MALFORMED_REQUEST:
message = error_details.get("message", "")
raise MalformedKeychainRequest(message)
else:
err = f"{response['data'].get('command')} failed with error: {error}"
self.log.error(f"{err}")
raise Exception(f"{err}")
async def add_private_key(self, mnemonic: str, passphrase: str) -> PrivateKey:
"""
Forwards to Keychain.add_private_key()
"""
key: PrivateKey
if self.use_local_keychain():
key = self.keychain.add_private_key(mnemonic, passphrase)
else:
response, success = await self.get_response_for_request(
"add_private_key", {"mnemonic": mnemonic, "passphrase": passphrase}
)
if success:
seed = mnemonic_to_seed(mnemonic, passphrase)
key = AugSchemeMPL.key_gen(seed)
else:
error = response["data"].get("error", None)
if error == KEYCHAIN_ERR_KEYERROR:
error_details = response["data"].get("error_details", {})
word = error_details.get("word", "")
raise KeyError(word)
else:
self.handle_error(response)
return key
async def check_keys(self, root_path):
"""
Forwards to init_funcs.check_keys()
"""
if self.use_local_keychain():
check_keys(root_path, self.keychain)
else:
response, success = await self.get_response_for_request("check_keys", {"root_path": str(root_path)})
if not success:
self.handle_error(response)
async def delete_all_keys(self):
"""
Forwards to Keychain.delete_all_keys()
"""
if self.use_local_keychain():
self.keychain.delete_all_keys()
else:
response, success = await self.get_response_for_request("delete_all_keys", {})
if not success:
self.handle_error(response)
async def delete_key_by_fingerprint(self, fingerprint: int):
"""
Forwards to Keychain.delete_key_by_fingerprint()
"""
if self.use_local_keychain():
self.keychain.delete_key_by_fingerprint(fingerprint)
else:
response, success = await self.get_response_for_request(
"delete_key_by_fingerprint", {"fingerprint": fingerprint}
)
if not success:
self.handle_error(response)
async def get_all_private_keys(self) -> List[Tuple[PrivateKey, bytes]]:
"""
Forwards to Keychain.get_all_private_keys()
"""
keys: List[Tuple[PrivateKey, bytes]] = []
if self.use_local_keychain():
keys = self.keychain.get_all_private_keys()
else:
response, success = await self.get_response_for_request("get_all_private_keys", {})
if success:
private_keys = response["data"].get("private_keys", None)
if private_keys is None:
err = f"Missing private_keys in {response.get('command')} response"
self.log.error(f"{err}")
raise MalformedKeychainResponse(f"{err}")
else:
for key_dict in private_keys:
pk = key_dict.get("pk", None)
ent_str = key_dict.get("entropy", None)
if pk is None or ent_str is None:
err = f"Missing pk and/or ent in {response.get('command')} response"
self.log.error(f"{err}")
continue # We'll skip the incomplete key entry
ent = bytes.fromhex(ent_str)
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, passphrase="")
key = AugSchemeMPL.key_gen(seed)
if bytes(key.get_g1()).hex() == pk:
keys.append((key, ent))
else:
err = "G1Elements don't match"
self.log.error(f"{err}")
else:
self.handle_error(response)
return keys
async def get_first_private_key(self) -> Optional[PrivateKey]:
"""
Forwards to Keychain.get_first_private_key()
"""
key: Optional[PrivateKey] = None
if self.use_local_keychain():
sk_ent = self.keychain.get_first_private_key()
if sk_ent:
key = sk_ent[0]
else:
response, success = await self.get_response_for_request("get_first_private_key", {})
if success:
private_key = response["data"].get("private_key", None)
if private_key is None:
err = f"Missing private_key in {response.get('command')} response"
self.log.error(f"{err}")
raise MalformedKeychainResponse(f"{err}")
else:
pk = private_key.get("pk", None)
ent_str = private_key.get("entropy", None)
if pk is None or ent_str is None:
err = f"Missing pk and/or ent in {response.get('command')} response"
self.log.error(f"{err}")
raise MalformedKeychainResponse(f"{err}")
ent = bytes.fromhex(ent_str)
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, passphrase="")
sk = AugSchemeMPL.key_gen(seed)
if bytes(sk.get_g1()).hex() == pk:
key = sk
else:
err = "G1Elements don't match"
self.log.error(f"{err}")
else:
self.handle_error(response)
return key
async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
"""
Locates and returns a private key matching the provided fingerprint
"""
key: Optional[PrivateKey] = None
if self.use_local_keychain():
private_keys = self.keychain.get_all_private_keys()
if len(private_keys) == 0:
raise KeyringIsEmpty()
else:
if fingerprint is not None:
for sk, _ in private_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
key = sk
break
else:
key = private_keys[0][0]
else:
response, success = await self.get_response_for_request(
"get_key_for_fingerprint", {"fingerprint": fingerprint}
)
if success:
pk = response["data"].get("pk", None)
ent = response["data"].get("entropy", None)
if pk is None or ent is None:
err = f"Missing pk and/or ent in {response.get('command')} response"
self.log.error(f"{err}")
raise MalformedKeychainResponse(f"{err}")
else:
mnemonic = bytes_to_mnemonic(bytes.fromhex(ent))
seed = mnemonic_to_seed(mnemonic, passphrase="")
private_key = AugSchemeMPL.key_gen(seed)
if bytes(private_key.get_g1()).hex() == pk:
key = private_key
else:
err = "G1Elements don't match"
self.log.error(f"{err}")
else:
self.handle_error(response)
return key
def wrap_local_keychain(keychain: Keychain, log: logging.Logger) -> KeychainProxy:
"""
Wrap an existing local Keychain instance in a KeychainProxy to utilize
the same interface as a remote Keychain
"""
return KeychainProxy(local_keychain=keychain, log=log)
async def connect_to_keychain(
self_hostname: str,
daemon_port: int,
ssl_context: Optional[ssl.SSLContext],
log: logging.Logger,
user: str = None,
service: str = None,
) -> KeychainProxy:
"""
Connect to the local daemon.
"""
client = KeychainProxy(
uri=f"wss://{self_hostname}:{daemon_port}", ssl_context=ssl_context, log=log, user=user, service=service
)
# Connect to the service if the proxy isn't using a local keychain
if not client.use_local_keychain():
await client.start()
return client
async def connect_to_keychain_and_validate(
root_path: Path,
log: logging.Logger,
*,
user: str = None,
service: str = None,
) -> Optional[KeychainProxy]:
"""
Connect to the local daemon and do a ping to ensure that something is really
there and running.
"""
try:
net_config = load_config(root_path, "config.yaml")
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"]
ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path, log=log)
connection = await connect_to_keychain(
net_config["self_hostname"], net_config["daemon_port"], ssl_context, log, user, service
)
# If proxying to a local keychain, don't attempt to ping
if connection.use_local_keychain():
return connection
r = await connection.ping()
if "value" in r["data"] and r["data"]["value"] == "pong":
return connection
except Exception as e:
print(f"Keychain(daemon) not started yet: {e}")
return None
return None
| 37.634831 | 115 | 0.574489 |
50e1a0397133e1de381235c9aed505e58846bc9c | 115 | py | Python | s3tail/__init__.py | bradrf/s3tail | 8b978880a0f6e7ce584f43caeaf2eef9d69aa865 | [
"MIT"
] | 2 | 2018-02-27T21:39:26.000Z | 2019-10-16T00:02:44.000Z | s3tail/__init__.py | bradrf/s3tail | 8b978880a0f6e7ce584f43caeaf2eef9d69aa865 | [
"MIT"
] | 17 | 2016-10-01T17:23:28.000Z | 2017-02-26T08:01:36.000Z | s3tail/__init__.py | bradrf/s3tail | 8b978880a0f6e7ce584f43caeaf2eef9d69aa865 | [
"MIT"
] | null | null | null | __author__ = 'Brad Robel-Forrest'
__email__ = 'brad@bitpony.com'
__version__ = '0.2.1'
from .s3tail import S3Tail
| 19.166667 | 33 | 0.73913 |
3fedd7fbb60ae4cba31053eea0273d0709acc06c | 589 | py | Python | scripts/report_data_usage.py | int-brain-lab/iblalyx | 645f3eedab5d90f9694da811044c3e7920a3818a | [
"MIT"
] | null | null | null | scripts/report_data_usage.py | int-brain-lab/iblalyx | 645f3eedab5d90f9694da811044c3e7920a3818a | [
"MIT"
] | 1 | 2022-03-18T13:29:38.000Z | 2022-03-18T13:42:50.000Z | scripts/report_data_usage.py | int-brain-lab/iblalyx | 645f3eedab5d90f9694da811044c3e7920a3818a | [
"MIT"
] | null | null | null | from pathlib import Path
from django.db.models import Sum
from data.models import DatasetType
import pandas as pd
dtypes = DatasetType.objects.all().annotate(size=Sum('dataset__file_size'))
df_types = pd.DataFrame.from_records(dtypes.values())
df_types['size'] = df_types['size'] / 1024 ** 3
df_types.to_csv(Path.home().joinpath('dataset_types.csv'))
dtypes.aggregate(siz=Sum('size'))['siz'] / 1024 ** 3
dtypes.filter(name__istartswith='ephysData').aggregate(siz=Sum('size'))['siz'] / 1024 ** 3
dtypes.filter(name__icontains='camera').aggregate(siz=Sum('size'))['siz'] / 1024 ** 3
| 29.45 | 90 | 0.731749 |
6e7e60ad1b8cdf33c0c36bee4827ecf69b747310 | 841 | py | Python | uncurl_app/utils.py | yjzhang/uncurl_app | e432f85f017839df6f082a127e4ec8dd08074ce0 | [
"MIT"
] | 7 | 2020-04-17T07:34:38.000Z | 2021-12-25T23:04:13.000Z | uncurl_app/utils.py | yjzhang/uncurl_app | e432f85f017839df6f082a127e4ec8dd08074ce0 | [
"MIT"
] | 1 | 2021-07-30T23:05:31.000Z | 2021-07-30T23:05:31.000Z | uncurl_app/utils.py | yjzhang/uncurl_app | e432f85f017839df6f082a127e4ec8dd08074ce0 | [
"MIT"
] | null | null | null | import json
import numpy as np
# to encode numpy stuff...
class SimpleEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.floating):
return float(o)
elif isinstance(o, np.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
def get_matrix_header(filename):
"""
Returns the entries, rows, and cols of a matrix market file.
"""
with open(filename) as f:
entries = 0
rows = 0
cols = 0
for line in f.readlines():
if line.startswith('%'):
continue
line = line.split()
entries = int(line[0])
rows = int(line[1])
cols = int(line[2])
return entries, rows, cols
| 24.735294 | 64 | 0.54459 |
965a0bc620009e7a43b74500cc3d55b90200bc3e | 1,239 | py | Python | src/rpg_trajectory_evaluation/trajectory_utils.py | XueLianjie/rpg_trajectory_evaluation | 7f49501d0fa09b5fa3790635ce68653ad8420d93 | [
"MIT"
] | 636 | 2018-10-03T10:37:54.000Z | 2022-03-29T12:36:11.000Z | src/rpg_trajectory_evaluation/trajectory_utils.py | XueLianjie/rpg_trajectory_evaluation | 7f49501d0fa09b5fa3790635ce68653ad8420d93 | [
"MIT"
] | 32 | 2018-10-12T07:43:39.000Z | 2022-03-18T09:46:56.000Z | src/rpg_trajectory_evaluation/trajectory_utils.py | XueLianjie/rpg_trajectory_evaluation | 7f49501d0fa09b5fa3790635ce68653ad8420d93 | [
"MIT"
] | 275 | 2018-10-06T12:48:46.000Z | 2022-03-14T07:52:48.000Z | #!/usr/bin/env python2
"""
@author: Christian Forster
"""
import os
import numpy as np
import transformations as tf
def get_rigid_body_trafo(quat, trans):
T = tf.quaternion_matrix(quat)
T[0:3, 3] = trans
return T
def get_distance_from_start(gt_translation):
distances = np.diff(gt_translation[:, 0:3], axis=0)
distances = np.sqrt(np.sum(np.multiply(distances, distances), 1))
distances = np.cumsum(distances)
distances = np.concatenate(([0], distances))
return distances
def compute_comparison_indices_length(distances, dist, max_dist_diff):
max_idx = len(distances)
comparisons = []
for idx, d in enumerate(distances):
best_idx = -1
error = max_dist_diff
for i in range(idx, max_idx):
if np.abs(distances[i]-(d+dist)) < error:
best_idx = i
error = np.abs(distances[i] - (d+dist))
if best_idx != -1:
comparisons.append(best_idx)
return comparisons
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
return np.arccos(
min(1, max(-1, (np.trace(transform[0:3, 0:3]) - 1)/2)))*180.0/np.pi
| 26.361702 | 75 | 0.63519 |
6ac8ac462c7a04df3af43e9754deb8a941429af3 | 536 | py | Python | src/bitcoin_acks/github_data/graphql_queries/__init__.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 43 | 2018-04-29T03:30:18.000Z | 2021-02-11T05:24:49.000Z | src/bitcoin_acks/github_data/graphql_queries/__init__.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 46 | 2018-05-02T01:27:34.000Z | 2022-03-26T13:29:55.000Z | src/bitcoin_acks/github_data/graphql_queries/__init__.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 11 | 2018-05-15T23:47:47.000Z | 2021-01-27T14:57:54.000Z | import os
def get_query(query_file: str):
path = os.path.dirname(os.path.abspath(__file__))
graphql_file = os.path.join(path, query_file)
with open(graphql_file, 'r') as query_file:
query = query_file.read()
return query
comments_graphql_query = get_query('comments.graphql')
pull_request_graphql_query = get_query('pull_request.graphql')
pull_requests_graphql_query = get_query('pull_requests.graphql')
reviews_graphql_query = get_query('reviews.graphql')
user_graphql_query = get_query('user.graphql')
| 31.529412 | 64 | 0.761194 |
8aef56566ddbdff32fc36c642589742b45b38699 | 1,699 | py | Python | fresh365/apps/goods/admin.py | rehth/MyDjango | 59f6d1fc667ecaf902c8d8c3571a633cce80490e | [
"MIT"
] | null | null | null | fresh365/apps/goods/admin.py | rehth/MyDjango | 59f6d1fc667ecaf902c8d8c3571a633cce80490e | [
"MIT"
] | null | null | null | fresh365/apps/goods/admin.py | rehth/MyDjango | 59f6d1fc667ecaf902c8d8c3571a633cce80490e | [
"MIT"
] | null | null | null | from django.contrib import admin
from apps.goods.models import GoodsType, IndexPromotionBanner, GoodsSKU,\
GoodsSPU, IndexTypeGoodsBanner, IndexGoodsBanner
from django.core.cache import cache
# Register your models here.
class BaseModelAdmin(admin.ModelAdmin):
"""数据表中的数据发生改变时调用 生产新的静态页面index.html"""
def save_model(self, request, obj, form, change):
"""新增或修改数据表中的数据时调用"""
super().save_model(request, obj, form, change)
from celery_tasks.tasks import generate_static_index_html
# 生成静态页面index.html
generate_static_index_html.delay()
# 清楚index页面的数据缓存cache
cache.delete('index_info')
def delete_model(self, request, obj):
"""删除数据表中的数据时调用"""
super().delete_model(request, obj)
# 生成静态页面index.html
from celery_tasks.tasks import generate_static_index_html
generate_static_index_html.delay()
# 清楚index页面的数据缓存cache
cache.delete('index_info')
@admin.register(GoodsType)
class GoodsTypeAdmin(BaseModelAdmin):
list_display = ['id', 'name', 'logo']
@admin.register(IndexPromotionBanner)
class IndexPromotionBannerAdmin(BaseModelAdmin):
list_display = ['id', 'name', 'url']
@admin.register(GoodsSKU)
class GoodsSKUAdmin(BaseModelAdmin):
list_display = ['id', 'name', 'goods']
@admin.register(GoodsSPU)
class GoodsSPUAdmin(BaseModelAdmin):
list_display = ['id', 'name', 'detail']
@admin.register(IndexTypeGoodsBanner)
class IndexTypeGoodsBannerAdmin(BaseModelAdmin):
list_display = ['id', 'sku', 'goods', 'display_type']
@admin.register(IndexGoodsBanner)
class IndexGoodsBannerAdmin(BaseModelAdmin):
list_display = ['id', 'sku', 'image', 'index'] | 30.339286 | 73 | 0.717481 |
7cba1b063f117f322643b16d16e9e0897c40d9b2 | 2,819 | py | Python | silx/gui/utils/test/test_image.py | vallsv/silx | 834bfe9272af99096faa360e1ad96291bf46a2ac | [
"CC0-1.0",
"MIT"
] | 1 | 2017-08-03T15:51:42.000Z | 2017-08-03T15:51:42.000Z | silx/gui/utils/test/test_image.py | vallsv/silx | 834bfe9272af99096faa360e1ad96291bf46a2ac | [
"CC0-1.0",
"MIT"
] | 7 | 2016-10-19T09:27:26.000Z | 2020-01-24T13:26:56.000Z | silx/gui/utils/test/test_image.py | payno/silx | 13301e61627f98fa837008250ac74a0627a7a560 | [
"CC0-1.0",
"MIT"
] | null | null | null | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Test of utils module."""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "16/01/2017"
import numpy
import unittest
from silx.gui import qt
from silx.gui.test.utils import TestCaseQt
from silx.gui.utils import _image
class TestQImageConversion(TestCaseQt):
"""Tests conversion of QImage to/from numpy array."""
def testConvertArrayToQImage(self):
"""Test conversion of numpy array to QImage"""
image = numpy.ones((3, 3, 3), dtype=numpy.uint8)
qimage = _image.convertArrayToQImage(image)
self.assertEqual(qimage.height(), image.shape[0])
self.assertEqual(qimage.width(), image.shape[1])
self.assertEqual(qimage.format(), qt.QImage.Format_RGB888)
color = qt.QColor(1, 1, 1).rgb()
self.assertEqual(qimage.pixel(1, 1), color)
def testConvertQImageToArray(self):
"""Test conversion of QImage to numpy array"""
qimage = qt.QImage(3, 3, qt.QImage.Format_RGB888)
qimage.fill(0x010101)
image = _image.convertQImageToArray(qimage)
self.assertEqual(qimage.height(), image.shape[0])
self.assertEqual(qimage.width(), image.shape[1])
self.assertEqual(image.shape[2], 3)
self.assertTrue(numpy.all(numpy.equal(image, 1)))
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(
TestQImageConversion))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 37.586667 | 79 | 0.682512 |
621983a5d3ac686f3eda84aacdbea12b81198fcf | 811 | py | Python | alfastaff_products/forms.py | spanickroon/Alfa-Staff | 4db9332634f278bac2fb7b543bcd86a2651214e2 | [
"MIT"
] | 1 | 2020-12-22T12:14:44.000Z | 2020-12-22T12:14:44.000Z | alfastaff_products/forms.py | ZayJob/Alfa-Staff | 4db9332634f278bac2fb7b543bcd86a2651214e2 | [
"MIT"
] | 6 | 2020-12-20T01:32:44.000Z | 2021-07-08T10:11:14.000Z | alfastaff_products/forms.py | spanickroon/Alfa-Staff | 4db9332634f278bac2fb7b543bcd86a2651214e2 | [
"MIT"
] | 1 | 2020-12-06T18:28:36.000Z | 2020-12-06T18:28:36.000Z | """This module contain classes for work with forms."""
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from alfastaff_account.models import Profile
class PasswordChangeForm(UserCreationForm):
"""PasswordChangeForm class with meta data for password change."""
class Meta:
"""Meta data."""
model = User
fields = ('password1', 'password2')
class ProfileChangeForm(forms.ModelForm):
"""ProfileChangeForm class with meta data for profile change."""
email = forms.EmailField()
class Meta:
"""Meta data."""
model = Profile
fields = (
'avatar', 'email', 'first_name', 'second_name', 'middle_name',
'number_phone', 'position', 'department',)
| 26.16129 | 74 | 0.667078 |
2f2465f92b2b0357d3dd635326dca630272d63c8 | 21,072 | py | Python | tests/test_triples_factory.py | cthoyt/pykeen | e2164149492291ba5e4b130ab8d2f9babfc55a50 | [
"MIT"
] | null | null | null | tests/test_triples_factory.py | cthoyt/pykeen | e2164149492291ba5e4b130ab8d2f9babfc55a50 | [
"MIT"
] | null | null | null | tests/test_triples_factory.py | cthoyt/pykeen | e2164149492291ba5e4b130ab8d2f9babfc55a50 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit tests for triples factories."""
import itertools as itt
import unittest
from unittest.mock import patch
import numpy as np
import pytest
import torch
from pykeen.datasets import Nations
from pykeen.datasets.nations import NATIONS_TRAIN_PATH
from pykeen.triples import LCWAInstances, TriplesFactory, TriplesNumericLiteralsFactory
from pykeen.triples.generation import generate_triples
from pykeen.triples.splitting import (
SPLIT_METHODS, _get_cover_deterministic, _tf_cleanup_all, _tf_cleanup_deterministic,
_tf_cleanup_randomized,
get_absolute_split_sizes, normalize_ratios,
)
from pykeen.triples.triples_factory import INVERSE_SUFFIX, TRIPLES_DF_COLUMNS, _map_triples_elements_to_ids
from pykeen.triples.utils import get_entities, get_relations
triples = np.array(
[
['peter', 'likes', 'chocolate_cake'],
['chocolate_cake', 'isA', 'dish'],
['susan', 'likes', 'pizza'],
['peter', 'likes', 'susan'],
],
dtype=np.str,
)
instance_mapped_triples = np.array(
[
[0, 0],
[2, 1],
[4, 1],
],
)
instance_labels = np.array(
[
np.array([1]),
np.array([0, 4]),
np.array([3]),
],
dtype=object,
)
numeric_triples = np.array(
[
['peter', '/lit/hasAge', '30'],
['peter', '/lit/hasHeight', '185'],
['peter', '/lit/hasChildren', '2'],
['susan', '/lit/hasAge', '28'],
['susan', '/lit/hasHeight', '170'],
],
dtype=np.str,
)
class TestTriplesFactory(unittest.TestCase):
"""Class for testing triples factories."""
def setUp(self) -> None:
"""Instantiate test instance."""
self.factory = Nations().training
def test_correct_inverse_creation(self):
"""Test if the triples and the corresponding inverses are created."""
t = [
['e1', 'a.', 'e5'],
['e1', 'a', 'e2'],
]
t = np.array(t, dtype=np.str)
factory = TriplesFactory.from_labeled_triples(triples=t, create_inverse_triples=True)
instances = factory.create_slcwa_instances()
assert len(instances) == 4
def test_automatic_incomplete_inverse_detection(self):
"""Test detecting that the triples contain inverses, warns about them, and filters them out."""
# comment(mberr): from my pov this behaviour is faulty: the triples factory is expected to say it contains
# inverse relations, although the triples contained in it are not the same we would have when removing the
# first triple, and passing create_inverse_triples=True.
t = [
['e3', f'a.{INVERSE_SUFFIX}', 'e10'],
['e1', 'a', 'e2'],
['e1', 'a.', 'e5'],
]
t = np.array(t, dtype=np.str)
for create_inverse_triples in (False, True):
with patch("pykeen.triples.triples_factory.logger.warning") as warning:
factory = TriplesFactory.from_labeled_triples(triples=t, create_inverse_triples=create_inverse_triples)
# check for warning
warning.assert_called()
# check for filtered triples
assert factory.num_triples == 2
# check for correct inverse triples flag
assert factory.create_inverse_triples == create_inverse_triples
def test_id_to_label(self):
"""Test ID-to-label conversion."""
for label_to_id, id_to_label in [
(self.factory.entity_to_id, self.factory.entity_id_to_label),
(self.factory.relation_to_id, self.factory.relation_id_to_label),
]:
for k in label_to_id.keys():
assert id_to_label[label_to_id[k]] == k
for k in id_to_label.keys():
assert label_to_id[id_to_label[k]] == k
def test_tensor_to_df(self):
"""Test tensor_to_df()."""
# check correct translation
labeled_triples = set(tuple(row) for row in self.factory.triples.tolist())
tensor = self.factory.mapped_triples
scores = torch.rand(tensor.shape[0])
df = self.factory.tensor_to_df(tensor=tensor, scores=scores)
re_labeled_triples = set(
tuple(row)
for row in df[['head_label', 'relation_label', 'tail_label']].values.tolist()
)
assert labeled_triples == re_labeled_triples
# check column order
assert tuple(df.columns) == TRIPLES_DF_COLUMNS + ('scores',)
def test_new_with_restriction(self):
"""Test new_with_restriction()."""
example_relation_restriction = {
'economicaid',
'dependent',
}
example_entity_restriction = {
'brazil',
'burma',
'china',
}
for inverse_triples in (True, False):
original_triples_factory = Nations(
create_inverse_triples=inverse_triples,
).training
for entity_restriction in (None, example_entity_restriction):
for relation_restriction in (None, example_relation_restriction):
# apply restriction
restricted_triples_factory = original_triples_factory.new_with_restriction(
entities=entity_restriction,
relations=relation_restriction,
)
# check that the triples factory is returned as is, if and only if no restriction is to apply
no_restriction_to_apply = (entity_restriction is None and relation_restriction is None)
equal_factory_object = (id(restricted_triples_factory) == id(original_triples_factory))
assert no_restriction_to_apply == equal_factory_object
# check that inverse_triples is correctly carried over
assert (
original_triples_factory.create_inverse_triples
== restricted_triples_factory.create_inverse_triples
)
# verify that the label-to-ID mapping has not been changed
assert original_triples_factory.entity_to_id == restricted_triples_factory.entity_to_id
assert original_triples_factory.relation_to_id == restricted_triples_factory.relation_to_id
# verify that triples have been filtered
if entity_restriction is not None:
present_entities = set(restricted_triples_factory.triples[:, 0]).union(
restricted_triples_factory.triples[:, 2])
assert set(entity_restriction).issuperset(present_entities)
if relation_restriction is not None:
present_relations = set(restricted_triples_factory.triples[:, 1])
exp_relations = set(relation_restriction)
assert exp_relations.issuperset(present_relations)
def test_create_lcwa_instances(self):
"""Test create_lcwa_instances."""
factory = Nations().training
instances = factory.create_lcwa_instances()
assert isinstance(instances, LCWAInstances)
# check compressed triples
# reconstruct triples from compressed form
reconstructed_triples = set()
for hr, row_id in zip(instances.pairs, range(instances.compressed.shape[0])):
h, r = hr.tolist()
_, tails = instances.compressed[row_id].nonzero()
reconstructed_triples.update(
(h, r, t)
for t in tails.tolist()
)
original_triples = {
tuple(hrt)
for hrt in factory.mapped_triples.tolist()
}
assert original_triples == reconstructed_triples
# check data loader
for batch in torch.utils.data.DataLoader(instances, batch_size=2):
assert len(batch) == 2
assert all(torch.is_tensor(x) for x in batch)
x, y = batch
batch_size = x.shape[0]
assert x.shape == (batch_size, 2)
assert x.dtype == torch.long
assert y.shape == (batch_size, factory.num_entities)
assert y.dtype == torch.get_default_dtype()
class TestSplit(unittest.TestCase):
"""Test splitting."""
triples_factory: TriplesFactory
def setUp(self) -> None:
"""Set up the tests."""
self.triples_factory = Nations().training
self.assertEqual(1592, self.triples_factory.num_triples)
def _test_invariants(self, training_triples_factory: TriplesFactory, *other_factories: TriplesFactory) -> None:
"""Test invariants for result of triples factory splitting."""
# verify that all entities and relations are present in the training factory
self.assertEqual(training_triples_factory.num_entities, self.triples_factory.num_entities)
self.assertEqual(training_triples_factory.num_relations, self.triples_factory.num_relations)
all_factories = (training_triples_factory, *other_factories)
# verify that no triple got lost
self.assertEqual(sum(t.num_triples for t in all_factories), self.triples_factory.num_triples)
# verify that the label-to-id mappings match
self.assertSetEqual({
id(factory.entity_to_id)
for factory in all_factories
}, {
id(self.triples_factory.entity_to_id),
})
self.assertSetEqual({
id(factory.relation_to_id)
for factory in all_factories
}, {
id(self.triples_factory.relation_to_id),
})
def test_split(self):
"""Test splitting a factory."""
cases = [
(2, 0.8),
(2, [0.8]),
(3, [0.80, 0.10]),
(3, [0.80, 0.10, 0.10]),
]
for method, (n, ratios), in itt.product(SPLIT_METHODS, cases):
with self.subTest(method=method, ratios=ratios):
factories = self.triples_factory.split(ratios, method=method)
self.assertEqual(n, len(factories))
self._test_invariants(*factories)
def test_cleanup_deterministic(self):
"""Test that triples in a test set can get moved properly to the training set."""
training = torch.as_tensor(data=[
[1, 1000, 2],
[1, 1000, 3],
[1, 1001, 3],
], dtype=torch.long)
testing = torch.as_tensor(data=[
[2, 1001, 3],
[1, 1002, 4],
], dtype=torch.long)
expected_training = torch.as_tensor(data=[
[1, 1000, 2],
[1, 1000, 3],
[1, 1001, 3],
[1, 1002, 4],
], dtype=torch.long)
expected_testing = torch.as_tensor(data=[
[2, 1001, 3],
], dtype=torch.long)
new_training, new_testing = _tf_cleanup_deterministic(training, testing)
assert (expected_training == new_training).all()
assert (expected_testing == new_testing).all()
new_testing, new_testing = _tf_cleanup_all([training, testing])
assert (expected_training == new_training).all()
assert (expected_testing == new_testing).all()
def test_cleanup_randomized(self):
"""Test that triples in a test set can get moved properly to the training set."""
training = torch.as_tensor(data=[
[1, 1000, 2],
[1, 1000, 3],
], dtype=torch.long)
testing = torch.as_tensor(data=[
[2, 1000, 3],
[1, 1000, 4],
[2, 1000, 4],
[1, 1001, 3],
], dtype=torch.long)
expected_training_1 = {
(1, 1000, 2),
(1, 1000, 3),
(1, 1000, 4),
(1, 1001, 3),
}
expected_testing_1 = {
(2, 1000, 3),
(2, 1000, 4),
}
expected_training_2 = {
(1, 1000, 2),
(1, 1000, 3),
(2, 1000, 4),
(1, 1001, 3),
}
expected_testing_2 = {
(2, 1000, 3),
(1, 1000, 4),
}
new_training, new_testing = [
set(tuple(row) for row in arr.tolist())
for arr in _tf_cleanup_randomized(training, testing)
]
if expected_training_1 == new_training:
self.assertEqual(expected_testing_1, new_testing)
elif expected_training_2 == new_training:
self.assertEqual(expected_testing_2, new_testing)
else:
self.fail('training was not correct')
def test_get_cover_deterministic(self):
"""Test _get_cover_deterministic."""
generated_triples = generate_triples()
cover = _get_cover_deterministic(triples=generated_triples)
# check type
assert torch.is_tensor(cover)
assert cover.dtype == torch.bool
# check format
assert cover.shape == (generated_triples.shape[0],)
# check coverage
self.assertEqual(
get_entities(generated_triples),
get_entities(generated_triples[cover]),
msg='entity coverage is not full',
)
self.assertEqual(
get_relations(generated_triples),
get_relations(generated_triples[cover]),
msg='relation coverage is not full',
)
class TestLiterals(unittest.TestCase):
"""Class for testing utils for processing numeric literals.tsv."""
def test_create_lcwa_instances(self):
"""Test creating LCWA instances."""
factory = TriplesNumericLiteralsFactory(triples=triples, numeric_triples=numeric_triples)
instances = factory.create_lcwa_instances()
id_peter = factory.entity_to_id['peter']
id_age = instances.literals_to_id['/lit/hasAge']
id_height = instances.literals_to_id['/lit/hasHeight']
id_num_children = instances.literals_to_id['/lit/hasChildren']
self.assertEqual(instances.numeric_literals[id_peter, id_age], 30)
self.assertEqual(instances.numeric_literals[id_peter, id_height], 185)
self.assertEqual(instances.numeric_literals[id_peter, id_num_children], 2)
id_susan = factory.entity_to_id['susan']
id_age = instances.literals_to_id['/lit/hasAge']
id_height = instances.literals_to_id['/lit/hasHeight']
id_num_children = instances.literals_to_id['/lit/hasChildren']
self.assertEqual(instances.numeric_literals[id_susan, id_age], 28)
self.assertEqual(instances.numeric_literals[id_susan, id_height], 170)
self.assertEqual(instances.numeric_literals[id_susan, id_num_children], 0)
id_chocolate_cake = factory.entity_to_id['chocolate_cake']
id_age = instances.literals_to_id['/lit/hasAge']
id_height = instances.literals_to_id['/lit/hasHeight']
id_num_children = instances.literals_to_id['/lit/hasChildren']
self.assertEqual(instances.numeric_literals[id_chocolate_cake, id_age], 0)
self.assertEqual(instances.numeric_literals[id_chocolate_cake, id_height], 0)
self.assertEqual(instances.numeric_literals[id_chocolate_cake, id_num_children], 0)
# Check if multilabels are working correctly
self.assertTrue((instance_mapped_triples == instances.pairs).all())
for i, exp in enumerate(instance_labels):
self.assertTrue((exp == instances.compressed[i].nonzero()[-1]).all())
def test_triples(self):
"""Test properties of the triples factory."""
triples_factory = TriplesFactory.from_labeled_triples(triples=triples)
self.assertEqual(set(range(triples_factory.num_entities)), set(triples_factory.entity_to_id.values()))
self.assertEqual(set(range(triples_factory.num_relations)), set(triples_factory.relation_to_id.values()))
assert (_map_triples_elements_to_ids(
triples=triples,
entity_to_id=triples_factory.entity_to_id,
relation_to_id=triples_factory.relation_to_id,
) == triples_factory.mapped_triples).all()
def test_inverse_triples(self):
"""Test that the right number of entities and triples exist after inverting them."""
triples_factory = TriplesFactory.from_labeled_triples(triples=triples, create_inverse_triples=True)
self.assertEqual(4, triples_factory.num_relations)
self.assertEqual(
set(range(triples_factory.num_entities)),
set(triples_factory.entity_to_id.values()),
msg='wrong number entities',
)
self.assertEqual(
set(range(triples_factory.real_num_relations)),
set(triples_factory.relation_to_id.values()),
msg='wrong number relations',
)
relations = set(triples[:, 1])
entities = set(triples[:, 0]).union(triples[:, 2])
self.assertEqual(len(entities), triples_factory.num_entities, msg='wrong number entities')
self.assertEqual(2, len(relations), msg='Wrong number of relations in set')
self.assertEqual(
2 * len(relations),
triples_factory.num_relations,
msg='Wrong number of relations in factory',
)
def test_metadata(self):
"""Test metadata passing for triples factories."""
t = Nations().training
self.assertEqual(NATIONS_TRAIN_PATH, t.metadata['path'])
self.assertEqual(
(
f'TriplesFactory(num_entities=14, num_relations=55, num_triples=1592,'
f' inverse_triples=False, path="{NATIONS_TRAIN_PATH}")'
),
repr(t),
)
entities = ['poland', 'ussr']
x = t.new_with_restriction(entities=entities)
entities_ids = t.entities_to_ids(entities=entities)
self.assertEqual(NATIONS_TRAIN_PATH, x.metadata['path'])
self.assertEqual(
(
f'TriplesFactory(num_entities=14, num_relations=55, num_triples=37,'
f' inverse_triples=False, entity_restriction={repr(entities_ids)}, path="{NATIONS_TRAIN_PATH}")'
),
repr(x),
)
relations = ['negativebehavior']
v = t.new_with_restriction(relations=relations)
relations_ids = t.relations_to_ids(relations=relations)
self.assertEqual(NATIONS_TRAIN_PATH, x.metadata['path'])
self.assertEqual(
(
f'TriplesFactory(num_entities=14, num_relations=55, num_triples=29,'
f' inverse_triples=False, path="{NATIONS_TRAIN_PATH}", relation_restriction={repr(relations_ids)})'
),
repr(v),
)
w = t.clone_and_exchange_triples(t.triples[0:5], keep_metadata=False)
self.assertIsInstance(w, TriplesFactory)
self.assertNotIn('path', w.metadata)
self.assertEqual(
'TriplesFactory(num_entities=14, num_relations=55, num_triples=5, inverse_triples=False)',
repr(w),
)
y, z = t.split()
self.assertEqual(NATIONS_TRAIN_PATH, y.metadata['path'])
self.assertEqual(NATIONS_TRAIN_PATH, z.metadata['path'])
def test_get_absolute_split_sizes():
"""Test get_absolute_split_sizes."""
for num_splits, n_total in zip(
(2, 3, 4),
(100, 200, 10412),
):
# generate random ratios
ratios = np.random.uniform(size=(num_splits,))
ratios = ratios / ratios.sum()
sizes = get_absolute_split_sizes(n_total=n_total, ratios=ratios)
# check size
assert len(sizes) == len(ratios)
# check value range
assert all(0 <= size <= n_total for size in sizes)
# check total split
assert sum(sizes) == n_total
# check consistency with ratios
rel_size = np.asarray(sizes) / n_total
# the number of decimal digits equivalent to 1 / n_total
decimal = np.floor(np.log10(n_total))
np.testing.assert_almost_equal(rel_size, ratios, decimal=decimal)
def test_normalize_ratios():
"""Test normalize_ratios."""
for ratios, exp_output in (
(0.5, (0.5, 0.5)),
((0.3, 0.2, 0.4), (0.3, 0.2, 0.4, 0.1)),
((0.3, 0.3, 0.4), (0.3, 0.3, 0.4)),
):
output = normalize_ratios(ratios=ratios)
# check type
assert isinstance(output, tuple)
assert all(isinstance(ratio, float) for ratio in output)
# check values
assert len(output) >= 2
assert all(0 <= ratio <= 1 for ratio in output)
output_np = np.asarray(output)
np.testing.assert_almost_equal(output_np.sum(), np.ones(1))
# compare against expected
np.testing.assert_almost_equal(output_np, np.asarray(exp_output))
def test_normalize_invalid_ratio():
"""Test invalid ratios."""
cases = [
1.1,
[1.1],
[0.8, 0.3],
[0.8, 0.1, 0.2],
]
for ratios in cases:
with pytest.raises(ValueError):
_ = normalize_ratios(ratios=ratios)
| 38.878229 | 119 | 0.614085 |
9af19b33b7030563c0ca3b6cf1721b8660f38d0a | 4,644 | py | Python | mne/io/tree.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | null | null | null | mne/io/tree.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 23 | 2017-09-12T11:08:26.000Z | 2019-10-04T11:11:29.000Z | mne/io/tree.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import numpy as np
from .constants import FIFF
from .tag import Tag
from .tag import read_tag
from .write import write_id, start_block, end_block, _write
from ..utils import logger, verbose
def dir_tree_find(tree, kind):
"""Find nodes of the given kind from a directory tree structure.
Parameters
----------
tree : dict
Directory tree.
kind : int
Kind to find.
Returns
-------
nodes : list
List of matching nodes.
"""
nodes = []
if isinstance(tree, list):
for t in tree:
nodes += dir_tree_find(t, kind)
else:
# Am I desirable myself?
if tree['block'] == kind:
nodes.append(tree)
# Search the subtrees
for child in tree['children']:
nodes += dir_tree_find(child, kind)
return nodes
@verbose
def make_dir_tree(fid, directory, start=0, indent=0, verbose=None):
"""Create the directory tree structure."""
FIFF_BLOCK_START = 104
FIFF_BLOCK_END = 105
FIFF_FILE_ID = 100
FIFF_BLOCK_ID = 103
FIFF_PARENT_BLOCK_ID = 110
if directory[start].kind == FIFF_BLOCK_START:
tag = read_tag(fid, directory[start].pos)
block = tag.data
else:
block = 0
logger.debug(' ' * indent + 'start { %d' % block)
this = start
tree = dict()
tree['block'] = block
tree['id'] = None
tree['parent_id'] = None
tree['nent'] = 0
tree['nchild'] = 0
tree['directory'] = directory[this]
tree['children'] = []
while this < len(directory):
if directory[this].kind == FIFF_BLOCK_START:
if this != start:
child, this = make_dir_tree(fid, directory, this, indent + 1)
tree['nchild'] += 1
tree['children'].append(child)
elif directory[this].kind == FIFF_BLOCK_END:
tag = read_tag(fid, directory[start].pos)
if tag.data == block:
break
else:
tree['nent'] += 1
if tree['nent'] == 1:
tree['directory'] = list()
tree['directory'].append(directory[this])
# Add the id information if available
if block == 0:
if directory[this].kind == FIFF_FILE_ID:
tag = read_tag(fid, directory[this].pos)
tree['id'] = tag.data
else:
if directory[this].kind == FIFF_BLOCK_ID:
tag = read_tag(fid, directory[this].pos)
tree['id'] = tag.data
elif directory[this].kind == FIFF_PARENT_BLOCK_ID:
tag = read_tag(fid, directory[this].pos)
tree['parent_id'] = tag.data
this += 1
# Eliminate the empty directory
if tree['nent'] == 0:
tree['directory'] = None
logger.debug(' ' * (indent + 1) + 'block = %d nent = %d nchild = %d'
% (tree['block'], tree['nent'], tree['nchild']))
logger.debug(' ' * indent + 'end } %d' % block)
last = this
return tree, last
###############################################################################
# Writing
def copy_tree(fidin, in_id, nodes, fidout):
"""Copy directory subtrees from fidin to fidout."""
if len(nodes) <= 0:
return
if not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
start_block(fidout, node['block'])
if node['id'] is not None:
if in_id is not None:
write_id(fidout, FIFF.FIFF_PARENT_FILE_ID, in_id)
write_id(fidout, FIFF.FIFF_BLOCK_ID, in_id)
write_id(fidout, FIFF.FIFF_PARENT_BLOCK_ID, node['id'])
if node['directory'] is not None:
for d in node['directory']:
# Do not copy these tags
if d.kind == FIFF.FIFF_BLOCK_ID or \
d.kind == FIFF.FIFF_PARENT_BLOCK_ID or \
d.kind == FIFF.FIFF_PARENT_FILE_ID:
continue
# Read and write tags, pass data through transparently
fidin.seek(d.pos, 0)
tag = Tag(*np.fromfile(fidin, ('>i4,>I4,>i4,>i4'), 1)[0])
tag.data = np.fromfile(fidin, '>B', tag.size)
_write(fidout, tag.data, tag.kind, 1, tag.type, '>B')
for child in node['children']:
copy_tree(fidin, in_id, child, fidout)
end_block(fidout, node['block'])
| 30.155844 | 79 | 0.5323 |
6f6275eab5561f4adc84ccc37aa66d77a77ac594 | 4,271 | py | Python | src/garage/tf/models/mlp_dueling_model.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | 1 | 2020-02-19T00:01:29.000Z | 2020-02-19T00:01:29.000Z | src/garage/tf/models/mlp_dueling_model.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | null | null | null | src/garage/tf/models/mlp_dueling_model.py | Maltimore/garage | a3f44b37eeddca37d157766a9a72e8772f104bcd | [
"MIT"
] | 1 | 2020-02-13T12:05:35.000Z | 2020-02-13T12:05:35.000Z | """MLP Dueling Model."""
import tensorflow as tf
from garage.tf.models.base import Model
from garage.tf.models.mlp import mlp
class MLPDuelingModel(Model):
"""MLP Model with dueling network structure.
Args:
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.contrib.layers.xavier_initializer,
hidden_b_init=tf.zeros_initializer,
output_nonlinearity=None,
output_w_init=tf.contrib.layers.xavier_initializer,
output_b_init=tf.zeros_initializer,
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def _build(self, state_input, name=None):
action_out = mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
name='action_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
state_out = mlp(input_var=state_input,
output_dim=1,
hidden_sizes=self._hidden_sizes,
name='state_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
action_out_mean = tf.reduce_mean(action_out, 1)
# calculate the advantage of performing certain action
# over other action in a particular state
action_out_advantage = action_out - tf.expand_dims(action_out_mean, 1)
q_func_out = state_out + action_out_advantage
return q_func_out
| 46.934066 | 78 | 0.621634 |
c38fade245fcb1a65b03c375d27a64d43b64e52f | 47,660 | py | Python | built-in/TensorFlow/Official/nlp/ALBERT-lcqmc-ZH_ID1461_for_TensorFlow/modeling_google_fast.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/nlp/ALBERT-lcqmc-ZH_ID1461_for_TensorFlow/modeling_google_fast.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/TensorFlow/Official/nlp/ALBERT-lcqmc-ZH_ID1461_for_TensorFlow/modeling_google_fast.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""The main ALBERT model and related functions.
For a description of the algorithm, see https://arxiv.org/abs/1909.11942.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from npu_bridge.npu_init import *
import collections
import copy
import json
import math
import re
import numpy as np
import six
from six.moves import range
import tensorflow as tf
class AlbertConfig(object):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
def __init__(self,
vocab_size,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
down_scale_factor=1,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.down_scale_factor = down_scale_factor
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `AlbertConfig` from a Python dictionary of parameters."""
config = AlbertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `AlbertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class AlbertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted from strings into ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.AlbertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.AlbertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for AlbertModel.
Args:
config: `AlbertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output,
self.output_embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.embedding_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=input_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_hidden_groups=config.num_hidden_groups,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
inner_group_num=config.inner_group_num,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.output_embedding_table
def gelu(x):
return npu_unary_ops.gelu(x)
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
elif act == "swish":
return lambda x: x * tf.sigmoid(x)
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
init_vars_name = [name for (name, _) in init_vars]
if num_of_group > 0:
assignment_map = []
for gid in range(num_of_group):
assignment_map.append(collections.OrderedDict())
else:
assignment_map = collections.OrderedDict()
for name in name_to_variable:
if name in init_vars_name:
tvar_name = name
elif (re.sub(r"/group_\d+/", "/group_0/",
six.ensure_str(name)) in init_vars_name and
num_of_group > 1):
tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name))
elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name))
elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name))
in init_vars_name and num_of_group > 1):
tvar_name = re.sub(r"/attention_\d+/", "/attention_1/",
six.ensure_str(name))
else:
tf.logging.info("name %s does not get matched", name)
continue
tf.logging.info("name %s match to %s", name, tvar_name)
if num_of_group > 0:
group_matched = False
for gid in range(1, num_of_group):
if (("/group_" + str(gid) + "/" in name) or
("/ffn_" + str(gid) + "/" in name) or
("/attention_" + str(gid) + "/" in name)):
group_matched = True
tf.logging.info("%s belongs to %dth", name, gid)
assignment_map[gid][tvar_name] = name
if not group_matched:
assignment_map[0][tvar_name] = name
else:
assignment_map[tvar_name] = name
initialized_variable_names[name] = 1
initialized_variable_names[six.ensure_str(name) + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = npu_ops.dropout(input_tensor)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_timing_signal_1d_given_position(channels,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Get sinusoids of diff frequencies, with timing position given.
Adapted from add_timing_signal_1d_given_position in
//third_party/py/tensor2tensor/layers/common_attention.py
Args:
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
position: a Tensor with shape [batch, seq_len]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor of timing signals [batch, seq_len, channels]
"""
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
return signal
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def dense_layer_3d(input_tensor,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_3d_proj(input_tensor,
hidden_size,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
num_attention_heads: The size of output dimension.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads= input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d(input_tensor,
output_size,
initializer,
activation,
num_attention_heads=1,
name=None,
num_groups=1):
"""A dense layer with 2D kernel.
Args:
input_tensor: Float tensor with rank 3.
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
num_groups: number of groups in dense layer
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
if num_groups == 1:
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
else:
assert hidden_size % num_groups == 0
assert output_size % num_groups == 0
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size//num_groups, output_size//num_groups, num_groups],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
input_tensor = tf.reshape(input_tensor, input_shape[:2] + [hidden_size//num_groups, num_groups])
ret = tf.einsum("BFHG,HOG->BFGO", input_tensor, w)
ret = tf.reshape(ret, input_shape[:2] + [output_size])
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def dense_layer_2d_old(input_tensor,
output_size,
initializer,
activation,
num_attention_heads=1,
name=None,
num_groups=1):
"""A dense layer with 2D kernel. 添加分组全连接的方式
Args:
input_tensor: Float tensor with rank 3. [ batch_size,sequence_length, hidden_size]
output_size: The size of output dimension.
initializer: Kernel initializer.
activation: Activation function.
num_groups: number of groups in dense layer
num_attention_heads: number of attention head in attention layer.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
del num_attention_heads # unused
input_shape = get_shape_list(input_tensor)
# print("#dense_layer_2d.1.input_shape of input_tensor:",input_shape) # e.g. [2, 512, 768] = [ batch_size,sequence_length, hidden_size]
hidden_size = input_shape[2]
if num_groups == 1:
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
else: # e.g. input_shape = [2, 512, 768] = [ batch_size,sequence_length, hidden_size]
assert hidden_size % num_groups == 0
assert output_size % num_groups == 0
# print("#dense_layer_2d.output_size:",output_size,";hidden_size:",hidden_size) # output_size = 3072; hidden_size = 768
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_groups, hidden_size//num_groups, output_size//num_groups],
initializer=initializer)
# print("#dense_layer_2d.2'w:",w.shape) # (16, 48, 192)
b = tf.get_variable(
name="bias", shape=[num_groups, output_size//num_groups], initializer=tf.zeros_initializer)
# input_tensor = [ batch_size,sequence_length, hidden_size].
# input_shape[:2] + [hidden_size//num_groups, num_groups] = [batch_size, sequence_length, hidden_size/num_groups, num_groups]
input_tensor = tf.reshape(input_tensor, input_shape[:2] + [hidden_size//num_groups, num_groups])
# print("#dense_layer_2d.2.input_shape of input_tensor:", input_tensor.shape)
input_tensor = tf.transpose(input_tensor, [3, 0, 1, 2]) # [num_groups, batch_size, sequence_length, hidden_size/num_groups]
# print("#dense_layer_2d.3.input_shape of input_tensor:", input_tensor.shape) # input_tensor=(16, 2, 512, 192)
# input_tensor=[num_groups, batch_size, sequence_length, hidden_size/num_groups], w=[num_groups, hidden_size/num_groups, output_size/num_groups]
ret = tf.einsum("GBFH,GHO->GBFO", input_tensor, w)
# print("#dense_layer_2d.4. shape of ret:", ret.shape) # (16, 2, 512, 48) = [num_groups, batch_size, sequence_length ,output_size]
b = tf.expand_dims(b, 1)
b = tf.expand_dims(b, 1)
# print("#dense_layer_2d.4.2.b:",b.shape) # (16, 1, 1, 48)
ret += b
ret = tf.transpose(ret, [1, 2, 0, 3]) # (2, 512, 16, 48)
# print("#dense_layer_2d.5. shape of ret:", ret.shape)
ret = tf.reshape(ret, input_shape[:2] + [output_size]) # [2, 512, 768]
if activation is not None:
return activation(ret)
else:
return ret
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
if len(from_shape) == 4:
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32)
elif len(from_shape) == 5:
# from_shape = [B, N, Block_num, block_size, depth]#
broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3],
1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length, num_attention_heads,
size_per_head].
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2]/num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act, "query")
# `key_layer` = [B, T, N, H]
k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act, "key")
# `value_layer` = [B, T, N, H]
v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0):
"""A network with attention-ffn as sub-block.
Args:
layer_input: float Tensor of shape [batch_size, from_seq_length,
from_width].
hidden_size: (optional) int, size of hidden layer.
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
attention_head_size: int. Size of attention head.
attention_probs_dropout_prob: float. dropout probability for attention_layer
intermediate_size: int. Size of intermediate hidden layer.
intermediate_act_fn: (optional) Activation function for the intermediate
layer.
initializer_range: float. Range of the weight initializer.
hidden_dropout_prob: (optional) float. Dropout probability of the hidden
layer.
Returns:
layer output
"""
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = dense_layer_3d_proj(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = dense_layer_2d(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
num_attention_heads=num_attention_heads,
name="dense",
num_groups=16)
with tf.variable_scope("output"):
ffn_output = dense_layer_2d(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
num_attention_heads=num_attention_heads,
name="dense",
num_groups=16)
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
inner_group_num=1,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_hidden_groups: int. Number of group for the hidden layers, parameters
in the same group are shared.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
inner_group_num: int, number of inner repetition of attention and ffn.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = hidden_size // num_attention_heads
input_shape = get_shape_list(input_tensor, expected_rank=3)
input_width = input_shape[2]
all_layer_outputs = []
if input_width != hidden_size:
prev_output = dense_layer_2d(
input_tensor, hidden_size, create_initializer(initializer_range),
None, name="embedding_hidden_mapping_in")
else:
prev_output = input_tensor
with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE):
for layer_idx in range(num_hidden_layers):
group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups)
with tf.variable_scope("group_%d" % group_idx):
with tf.name_scope("layer_%d" % layer_idx):
layer_output = prev_output
for inner_group_idx in range(inner_group_num):
with tf.variable_scope("inner_group_%d" % inner_group_idx):
layer_output = attention_ffn_block(
layer_output, hidden_size, attention_mask,
num_attention_heads, attention_head_size,
attention_probs_dropout_prob, intermediate_size,
intermediate_act_fn, initializer_range, hidden_dropout_prob)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 40.458404 | 150 | 0.678619 |
250ba1daa17db9cfd798048235d77561bb259598 | 244 | py | Python | federated_aggregations/paillier/placement.py | tf-encrypted/federated-aggregations | b4ab7a15c2719d4119db7d9d609f8c06d9df8958 | [
"Apache-2.0"
] | 16 | 2020-08-07T05:40:09.000Z | 2022-01-08T20:32:07.000Z | federated_aggregations/paillier/placement.py | tf-encrypted/federated-aggregations | b4ab7a15c2719d4119db7d9d609f8c06d9df8958 | [
"Apache-2.0"
] | 1 | 2020-10-14T00:18:39.000Z | 2020-10-19T14:13:03.000Z | federated_aggregations/paillier/placement.py | tf-encrypted/federated-aggregations | b4ab7a15c2719d4119db7d9d609f8c06d9df8958 | [
"Apache-2.0"
] | 2 | 2020-09-08T10:16:28.000Z | 2021-01-14T12:33:01.000Z | from tensorflow_federated.python.core.impl.types import placement_literals
AGGREGATOR = placement_literals.PlacementLiteral(
'AGGREGATOR',
'aggregator',
default_all_equal=True,
description='An "unplacement" for aggregations.')
| 30.5 | 74 | 0.782787 |
e25096a02eb0192940f359b5f239ae28ffc1f7a1 | 206 | py | Python | backend/src/baserow/api/applications/errors.py | calvinchengx/baserow | 0340d5abf0a3b48154d41fd05cd2e1e05814cd66 | [
"MIT"
] | 1 | 2021-04-13T16:27:58.000Z | 2021-04-13T16:27:58.000Z | backend/src/baserow/api/applications/errors.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | 7 | 2021-03-19T12:07:13.000Z | 2022-02-10T14:47:21.000Z | backend/src/baserow/api/applications/errors.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | null | null | null | from rest_framework.status import HTTP_404_NOT_FOUND
ERROR_APPLICATION_DOES_NOT_EXIST = (
'ERROR_APPLICATION_DOES_NOT_EXIST',
HTTP_404_NOT_FOUND,
'The requested application does not exist.'
)
| 22.888889 | 52 | 0.800971 |
1094e92b2feb0e0c1e430aca0ec9f41f7aebc5f3 | 23,485 | py | Python | src/lib/packet/script/code_generator.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 20 | 2019-12-04T01:28:52.000Z | 2022-03-17T14:09:34.000Z | src/lib/packet/script/code_generator.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 115 | 2020-02-04T21:29:54.000Z | 2022-02-17T13:33:51.000Z | src/lib/packet/script/code_generator.py | DerangedMonkeyNinja/openperf | cde4dc6bf3687f0663c11e9e856e26a0dc2b1d16 | [
"Apache-2.0"
] | 16 | 2019-12-03T16:41:18.000Z | 2021-11-06T04:44:11.000Z | import argparse
import re
import os
import pathlib
import sys
import textwrap
import yaml
HEADER_GUARD="_LIB_PACKET_PROTOCOL_{}_HPP_"
HEADER_NAMESPACE="libpacket::protocol"
INCLUDE_BASE_DIR="packet/type"
def write_file_header(output, name, kind):
header='''
/**
* {name} {kind} for the packet header C++ Library
*
* This file is automatically generated by the library code generator.
* Do not edit this file manually.
**/
'''
output.write(textwrap.dedent(header.format(name=name,kind=kind)).lstrip())
def write_comment(output, text):
wrapper = textwrap.TextWrapper(initial_indent=" * ")
output.write('/**\n')
for line in wrapper.wrap(text):
output.write(line)
output.write('\n **/\n\n')
def write_cr(output, count=None):
repeat = count if count else 1
output.write('\n'*repeat)
def to_cpp_name(name):
return name.lower()
class cpp_indenter():
def __init__(self, output):
self.output = output
def write(self, s):
self.output.write(' ' + s)
class cpp_scope():
def __init__(self, output, text = None):
self.output = output
self.text = text
def __enter__(self):
if self.text:
if self.text[-1] != '{':
line = (self.text + '\n{\n')
else:
line = (self.text + '\n')
self.output.write(line)
else:
self.output.write('{\n')
return (cpp_indenter(self.output))
def __exit__(self, type, value, traceback):
self.output.write('}\n')
class semi_scope():
def __init__(self, output, name):
self.output = output
self.name = name
def __enter__(self):
self.output.write('{}\n'.format(self.name))
self.output.write('{\n')
return cpp_indenter(self.output)
def __exit__(self, type, value, traceback):
self.output.write('};\n')
class cpp_namespace():
def __init__(self, output, ns):
self.output = output
self.ns = ns
def __enter__(self):
self.output.write('namespace {} {{\n'.format(self.ns))
write_cr(self.output)
return self.output
def __exit__(self, type, value, traceback):
write_cr(self.output)
self.output.write('}\n')
class cpp_header_guard():
def __init__(self, output, name):
self.guard = HEADER_GUARD.format(name.upper())
self.output = output
def __enter__(self):
self.output.write('#ifndef {}\n'.format(self.guard))
self.output.write('#define {}\n'.format(self.guard))
write_cr(self.output)
return self.output
def __exit__(self, type, value, traceback):
write_cr(self.output)
self.output.write('#endif /* {} */\n'.format(self.guard))
def get_length_in_bits(field):
fmt = field['format'] if 'format' in field else 'field'
if fmt == 'field' or fmt == 'enumeration' or fmt == 'number':
return (field['length'])
elif fmt == 'ipv4':
return 32
elif fmt == 'mac':
return 48
elif fmt == 'ipv6':
return 128
assert False, 'Unrecognized format'
def make_field_mask(offset, nb_bits, width):
assert offset + nb_bits <= width
mask = 0
for i in range(offset, width):
if i - offset < nb_bits:
mask |= 1 << (width - (i + 1))
return mask
def octets_to_unsigned_int_type(octets):
assert octets <= 8
if octets <= 1:
return ('uint8_t')
if octets <= 2:
return ('uint16_t')
if octets <= 4:
return ('uint32_t')
return ('uint64_t')
def field_to_unsigned_int_type(field):
return octets_to_unsigned_int_type(int(re.match('.+endian::field<(\d+)>', field).group(1)))
def get_cpp_type(name, props, nb_bits):
fmt = props['format'] if 'format' in props else 'field'
if fmt == 'field' or fmt == 'enumeration':
assert nb_bits % 8 == 0, 'Number of bits must be a multiple of 8'
return 'type::endian::field<{:d}>'.format(nb_bits // 8)
if fmt == 'number':
assert nb_bits % 8 == 0, 'Number of bits must be a multiple of 8'
assert nb_bits <= 64, 'Number of bits exceeds machine type'
return 'type::endian::number<{}>'.format(octets_to_unsigned_int_type(nb_bits // 8))
elif fmt == 'ipv4':
return 'type::ipv4_address'
elif fmt == 'ipv6':
return 'type::ipv6_address'
elif fmt == 'mac':
return 'type::mac_address'
assert False, 'Unrecognized format'
def get_cpp_function_type(struct, field, props):
if 'values' in props:
if props['values']['unique']:
return 'enum {}::{}_value'.format(struct, field)
else:
return 'type::bit_flags<{}::{}_value>'.format(struct, field)
cpp_type = props['cpp_type']
match = re.match('type::endian::field<(\d+)>', cpp_type)
if match:
return 'bool' if props['length'] == 1 else octets_to_unsigned_int_type(int(match.group(1)))
#return octets_to_unsigned_int_type(int(match.group(1)))
match = re.match('type::endian::number<(\w+)>', cpp_type)
if match:
return match.group(1)
return 'const {}&'.format(props['cpp_type'])
def get_cpp_base_type(struct, field, props):
cpp_type = props['cpp_type']
match = re.match('type::endian::field<(\d+)>', cpp_type)
if match:
return octets_to_unsigned_int_type(int(match.group(1)))
match = re.match('type::endian::number<(\w+)>', cpp_type)
if match:
return match.group(1)
return cpp_type
def get_getter_declaration(struct, field, props):
return '{0} get_{1}_{2}(const {1}& header) noexcept'.format(
get_cpp_function_type(struct, field, props),
struct,
field)
def is_reference(cpp_type):
return cpp_type.endswith('&')
def to_lvalue(ref_type):
assert is_reference(ref_type), 'must be reference type'
base_type = re.search('(const)?\s+(.*)&', ref_type).group(2).strip();
return '{}&&'.format(base_type)
def is_lvalue(cpp_type):
return cpp_type.endswith('&&')
def needs_cast(cpp_type):
if cpp_type.find('bit_flags') >= 0:
return True
if cpp_type.find('enum') >= 0:
return True
if cpp_type.find('bool') >= 0:
return True
return False
def is_number(cpp_type):
return cpp_type.find('endian::number') >= 0
def get_setter_declaration(struct, field, arg_type):
return ('void set_{0}_{1}({0}& header, {2} value) noexcept'.format(
struct, field, arg_type))
def get_field_translate_declaration(name, namespace = None):
cpp_name = to_cpp_name(name)
ns = '{}::'.format(namespace) if namespace else ''
return 'enum {0}::field_name {1}get_field_name(std::string_view name) noexcept'.format(cpp_name, ns)
def get_field_type_declaration(name, namespace = None):
ns = '{}::'.format(namespace) if namespace else ''
return 'const std::type_info& {0}get_field_type({0}field_name field) noexcept'.format(ns)
def get_defaults_setter_declaration(name, namespace = None):
cpp_name = to_cpp_name(name)
return 'void set_{0}_defaults({0}& header) noexcept'.format(cpp_name)
def generate_field_data(fields):
out_fields = dict()
loop_fields = dict()
for name, values in fields.items():
loop_fields[name] = values
loop_bits = sum(get_length_in_bits(v) for v in loop_fields.values())
if loop_bits % 8 != 0:
continue
# If combining fields, make sure all fields have the same type
formats = set(map(lambda v: v['format'] if 'format' in v else 'field', loop_fields.values()))
if 'number' in formats and len(formats) > 1:
sys.stderr.write('Combined fields ({}) cannot contain a numeric field\n'.format(
', '.join(loop_fields.keys())))
sys.exit(1)
loop_name = '_'.join(loop_fields.keys())
offset = 0
for name, props in loop_fields.items():
field_bits = get_length_in_bits(props)
out_fields[name] = {
'length': field_bits,
'struct_name': loop_name,
'cpp_type': get_cpp_type(name, props, loop_bits),
}
if 'default' in props:
out_fields[name]['default'] = props['default']
if 'multipleOf' in props:
out_fields[name]['multipleOf'] = props['multipleOf']
if 'items' in props:
value_tuples = list()
for item in props['items']:
value_tuples.append(next(iter(item.items())))
out_fields[name]['values'] = {
'items': value_tuples,
'unique': props['uniqueItems'] if 'uniqueItems' in props else False
}
if (len(loop_fields.keys()) > 1):
out_fields[name]['mask'] = make_field_mask(offset, field_bits, loop_bits)
out_fields[name]['shift'] = loop_bits - field_bits - offset
offset += field_bits
loop_fields = dict()
return out_fields
def get_struct_tuples(field_data):
tuples = list()
for props in field_data.values():
x = (props['cpp_type'], props['struct_name'])
if x not in tuples:
tuples.append(x)
return (tuples)
def get_includes(data):
headers = set()
for props in data.values():
cpp_type = props['cpp_type']
addr_match = re.match('(\w+)::(\w+)_address', cpp_type)
if addr_match:
headers.add(os.path.join(INCLUDE_BASE_DIR,
'{}_address.hpp'.format(addr_match.group(2))))
elif cpp_type.find('endian') >= 0:
if 'values' in props and not props['values']['unique']:
headers.add(os.path.join(INCLUDE_BASE_DIR,
'enum_flags.hpp'))
headers.add(os.path.join(INCLUDE_BASE_DIR, 'endian.hpp'))
else:
assert False, 'unhandled cpp type: {}'.format(cpp_type)
return sorted(headers)
def has_defaults(data):
return sum(list(map(lambda p: 1 if 'default' in p else 0, data.values()))) > 0
"""
C++ header writing functions
"""
def write_header_struct(output, name, data):
with semi_scope(output, 'struct {}'.format(name)) as s:
# Generate some useful static variables
s.write('static constexpr size_t protocol_field_count = {};\n'.format(
len(data.keys())
))
s.write('static constexpr uint16_t protocol_length = {};\n'.format(
sum(field['length'] for field in data.values()) // 8
))
s.write('static constexpr std::string_view protocol_name = "{}";\n'.format(
name
))
# Generate an enum for each field
write_cr(output)
with semi_scope(s, 'enum class field_name') as e:
e.write('none,\n')
for field in data.keys():
e.write('{},\n'.format(field))
# Generate any enums we might need
for field, props in data.items():
if 'values' in props:
write_cr(output)
with semi_scope(s, 'enum class {}_value'.format(field)) as e:
for enum, value in props['values']['items']:
e.write('{} = 0x{:x},\n'.format(enum, value))
write_cr(output)
for kind, field_name in get_struct_tuples(data):
s.write('{} {};\n'.format(kind, field_name))
# Generate static functions
write_cr(output)
s.write('static {};\n'.format(get_field_translate_declaration(name)))
s.write('static {};\n'.format(get_field_type_declaration(name)))
write_cr(output)
s.write('template <typename Value>\n')
s.write('void set_field(enum field_name field, Value value) noexcept;\n')
def write_template_setter(output, name, data):
cpp_name = to_cpp_name(name)
output.write('template <typename Value>\n')
output.write('void {0}::set_field(enum {0}::field_name field, Value value) noexcept\n'.format(cpp_name))
with cpp_scope(output) as f:
with cpp_scope(f, 'switch (field) {') as s:
for field, props in data.items():
arg_type = get_cpp_function_type(cpp_name, field, props)
base_type = get_cpp_base_type(cpp_name, field, props)
s.write('case {0}::field_name::{1}:\n'.format(cpp_name, field))
s.write(' if constexpr (std::is_convertible_v<Value, {0}>) {{\n'.format(base_type))
if needs_cast(arg_type):
s.write(' set_{0}_{1}(*this, static_cast<{2}>(value));\n'.format(
cpp_name, field, arg_type))
else:
s.write(' set_{0}_{1}(*this, value);\n'.format(
cpp_name, field))
s.write(' }\n')
s.write(' break;\n')
s.write('default:\n')
s.write(' break; /* no-op */\n')
def write_cpp_header_contents(output, name, data):
cpp_name = to_cpp_name(name)
write_header_struct(output, cpp_name, data)
write_cr(output)
write_comment(output, '{} get functions'.format(name))
for field, props in data.items():
output.write('{};\n'.format(get_getter_declaration(cpp_name, field, props)))
write_cr(output)
write_comment(output, '{} set functions'.format(name))
if has_defaults(data):
output.write('{};\n'.format(get_defaults_setter_declaration(name)))
for field, props in data.items():
arg_type = get_cpp_function_type(cpp_name, field, props)
output.write('{};\n'.format(get_setter_declaration(cpp_name, field, arg_type)))
if is_reference(arg_type):
output.write('{};\n'.format(get_setter_declaration(cpp_name, field, to_lvalue(arg_type))))
write_cr(output)
write_comment(output, '{} generic functions'.format(name))
write_template_setter(output, name, data)
def maybe_write_enum_declaration(output, name, data):
cpp_name = to_cpp_name(name)
enums = list(filter(lambda item: 'values' in item[1] and not item[1]['values']['unique'],
data.items()))
if len(enums):
write_cr(output)
for name, props in enums:
output.write('declare_libpacket_enum_flags({}::{}::{}_value);\n'.format(
HEADER_NAMESPACE, cpp_name, name
))
"""
C++ implementation writing functions
"""
def generate_getter_impl(output, struct, field, props):
return_type = get_cpp_function_type(struct, field, props)
output.write('{}\n'.format(get_getter_declaration(struct, field, props)))
output.write('{\n')
if is_reference(return_type):
output.write(' return (header.{});\n'.format(props['struct_name']))
elif is_number(props['cpp_type']):
output.write(' return (header.{}.load());\n'.format(props['struct_name'], return_type))
else:
if needs_cast(return_type):
to_load = field_to_unsigned_int_type(props['cpp_type'])
else:
to_load = return_type
if 'mask' in props:
output.write(' auto tmp = header.{}.load<{}>() & 0x{:x};\n'.format(
props['struct_name'], to_load, props['mask']))
if 'shift' in props and props['shift']:
output.write(' tmp >>= {};\n'.format(props['shift']))
else:
output.write(' auto tmp = header.{}.load<{}>();\n'.format(props['struct_name'], to_load))
if 'multipleOf' in props:
output.write(' tmp *= {};\n'.format(props['multipleOf']))
if needs_cast(return_type):
output.write(' return (static_cast<{}>(tmp));\n'.format(return_type))
else:
output.write(' return (tmp);\n')
output.write('}\n')
def generate_setter_impl(output, struct, field, props, arg_type):
output.write('{}\n'.format(get_setter_declaration(struct, field, arg_type)))
output.write('{\n')
if is_lvalue(arg_type):
output.write(' set_{0}_{1}(header, value);\n'.format(struct, field))
elif is_reference(arg_type) or is_number(props['cpp_type']):
output.write(' header.{} = value;\n'.format(props['struct_name']))
else:
var_name = 'value'
int_type = field_to_unsigned_int_type(props['cpp_type'])
if needs_cast(arg_type):
if arg_type.find('bit_flag') >= 0:
var_name = 'value.value'
output.write(' auto tmp = static_cast<{}>({});\n'.format(int_type, var_name))
var_name = 'tmp'
if 'multipleOf' in props:
output.write(' {} /= {};\n'.format(var_name, props['multipleOf']))
if 'mask' in props:
if 'shift' in props and props['shift']:
output.write(' {} <<= {};\n'.format(var_name, props['shift']))
output.write(' header.{0}.store(static_cast<{2}>((({3} & 0x{1:x}) | (header.{0}.load<{2}>() & ~0x{1:x}))));\n'.format(
props['struct_name'], props['mask'],
field_to_unsigned_int_type(props['cpp_type']), var_name))
else:
output.write(' header.{}.store({});\n'.format(props['struct_name'], var_name))
output.write('}\n')
def write_assoc_array_template(output):
template='''
template <typename Key, typename Value, typename... Pairs>
constexpr auto associative_array(Pairs&&... pairs)
-> std::array<std::pair<Key, Value>, sizeof... (pairs)>
{
return {{std::forward<Pairs>(pairs)...}};
}
'''
output.write(textwrap.dedent(template).lstrip())
def generate_field_translate_impl(output, name, data):
write_assoc_array_template(output)
write_cr(output)
cpp_name = to_cpp_name(name)
with cpp_scope(output, get_field_translate_declaration(name, cpp_name)) as f:
f.write('constexpr auto field_names = associative_array<std::string_view, {0}::field_name>(\n'
.format(cpp_name))
lines = list()
for field in data.keys():
lines.append(' std::pair("{1}", {0}::field_name::{1})'.format(cpp_name, field))
f.write('{});\n'.format(',\n '.join(lines)))
write_cr(output)
f.write('auto cursor = std::begin(field_names), end = std::end(field_names);\n')
with cpp_scope(f, 'while (cursor != end) {') as loop:
loop.write('if (cursor->first == name) return (cursor->second);\n')
loop.write('cursor++;\n')
write_cr(output)
f.write('return ({0}::field_name::none);\n'.format(cpp_name))
def generate_field_type_impl(output, name, data):
cpp_name = to_cpp_name(name)
with cpp_scope(output, get_field_type_declaration(name, cpp_name)) as f:
with cpp_scope(f, 'switch (field) {') as s:
for field, props in data.items():
base_type = get_cpp_base_type(cpp_name, field, props)
s.write('case {0}::field_name::{1}:\n'.format(cpp_name, field))
s.write(' return (typeid({}));\n'.format(base_type))
s.write('default:\n')
s.write(' return (typeid(nullptr));\n')
def generate_default_setter_impl(output, name, data):
cpp_name = to_cpp_name(name)
with cpp_scope(output, get_defaults_setter_declaration(name, cpp_name)) as f:
for field, props in data.items():
if 'default' not in props:
continue
f.write('set_{0}_{1}(header, {2});\n'.format(cpp_name, field, props['default']))
def generate_getters(struct, data):
for field, props in data.items():
generate_getter(struct, field, props)
def generate_setters(output, struct, field, props):
arg_type = get_cpp_function_type(struct, field, props)
generate_setter_impl(output, struct, field, props, arg_type)
if is_reference(arg_type):
write_cr(output)
generate_setter_impl(output, struct, field, props, to_lvalue(arg_type))
def write_cpp_implementation_contents(output, name, data):
cpp_name = to_cpp_name(name)
skip_cr = True
write_comment(output, '{} getter implementations'.format(name))
for field, props in data.items():
generate_getter_impl(output, cpp_name, field, props)
write_cr(output)
write_comment(output, '{} setter implementations'.format(name))
if has_defaults(data):
generate_default_setter_impl(output, name, data)
write_cr(output)
nb_items = len(data.items())
for idx, (field, props) in enumerate(data.items()):
generate_setters(output, cpp_name, field, props)
if idx + 1 < nb_items:
write_cr(output)
write_cr(output)
write_comment(output, '{} field translate implementation'.format(name))
generate_field_translate_impl(output, name, data)
write_cr(output)
generate_field_type_impl(output, name, data)
"""
Top level implementation functions
"""
def write_cpp_header(output, name, fields):
cpp_name = to_cpp_name(name)
data = generate_field_data(fields)
with cpp_header_guard(output, cpp_name):
write_file_header(output, name, 'header')
write_cr(output)
output.write('#include <type_traits>\n')
for include in get_includes(data):
output.write('#include "{}"\n'.format(include))
write_cr(output)
with cpp_namespace(output, HEADER_NAMESPACE):
write_cpp_header_contents(output, name, data)
maybe_write_enum_declaration(output, name, data)
def write_cpp_implementation(output, name, fields, header):
cpp_name = to_cpp_name(name)
data = generate_field_data(fields)
write_file_header(output, name, 'implementation')
write_cr(output)
output.write('#include "{}"\n'.format(header))
write_cr(output)
with cpp_namespace(output, HEADER_NAMESPACE):
write_cpp_implementation_contents(output, name, data)
"""
Begin script proper
"""
def main():
parser = argparse.ArgumentParser(description="Generate C++ headers from yaml definitions")
parser.add_argument('--infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help="input YAML file containing header definition")
parser.add_argument('--outdir',
nargs='?',
type=pathlib.Path,
default=os.getcwd(),
help="output directory to write generated files")
args = parser.parse_args()
if not os.path.isdir(args.outdir):
sys.stderr.write('Output directory, {}, does not exist\n'.format(args.outdir))
sys.exit(1)
if not os.access(args.outdir, os.W_OK):
sys.stderr.write('Output directory, {}, is not writable\n'.format(args.outdir))
sys.exit(1)
obj = yaml.load(args.infile, Loader=yaml.FullLoader)
for name, data in obj.items():
file_root = to_cpp_name(name)
header = file_root + '.hpp'
fields = data['fields']
with open(os.path.join(args.outdir, header), 'w') as header_out:
write_cpp_header(header_out, name, fields)
with open(os.path.join(args.outdir, file_root + '.cpp'), 'w') as impl_out:
write_cpp_implementation(impl_out, name, fields, header)
if __name__ == "__main__":
main()
| 32.215364 | 133 | 0.601703 |
6431222561a1b7fc69b06f0cef26846d9aa4c4f4 | 1,121 | bzl | Python | tests/src_utils_tests.bzl | cgrindel/rules_swiftformat | 4ee27436efbd88f8a072e555d6b5e0384c23721d | [
"Apache-2.0"
] | 1 | 2021-11-02T22:28:10.000Z | 2021-11-02T22:28:10.000Z | tests/src_utils_tests.bzl | cgrindel/rules_swiftformat | 4ee27436efbd88f8a072e555d6b5e0384c23721d | [
"Apache-2.0"
] | 17 | 2021-10-09T19:33:51.000Z | 2022-03-07T16:17:51.000Z | tests/src_utils_tests.bzl | cgrindel/rules_swiftformat | 4ee27436efbd88f8a072e555d6b5e0384c23721d | [
"Apache-2.0"
] | null | null | null | """Tests for src_utils module."""
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("//swiftformat:defs.bzl", "src_utils")
def _is_label_test(ctx):
env = unittest.begin(ctx)
asserts.true(env, src_utils.is_label("//Sources/Foo"))
asserts.true(env, src_utils.is_label(":Foo"))
asserts.true(env, src_utils.is_label("//Sources/Foo:bar"))
asserts.false(env, src_utils.is_label("Bar.swift"))
asserts.false(env, src_utils.is_label("path/to/Bar.swift"))
return unittest.end(env)
is_label_test = unittest.make(_is_label_test)
def _is_path_test(ctx):
env = unittest.begin(ctx)
asserts.true(env, src_utils.is_path("Bar.swift"))
asserts.true(env, src_utils.is_path("path/to/Bar.swift"))
asserts.false(env, src_utils.is_path("//Sources/Foo"))
asserts.false(env, src_utils.is_path(":Foo"))
asserts.false(env, src_utils.is_path("//Sources/Foo:bar"))
return unittest.end(env)
is_path_test = unittest.make(_is_path_test)
def src_utils_test_suite():
return unittest.suite(
"src_utils_tests",
is_label_test,
is_path_test,
)
| 29.5 | 63 | 0.695807 |
8b09cf4d791c0173749419e41934712d6553e33a | 989 | py | Python | var/spack/repos/builtin/packages/sas/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-05-24T15:23:12.000Z | 2020-05-24T15:23:12.000Z | var/spack/repos/builtin/packages/sas/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 6 | 2022-02-26T11:44:34.000Z | 2022-03-12T12:14:50.000Z | var/spack/repos/builtin/packages/sas/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-06T11:04:37.000Z | 2020-03-06T11:04:37.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sas(CMakePackage):
"""SAS (Static Analysis Suite) is a powerful tool for running static
analysis on C++ code."""
homepage = "https://github.com/dpiparo/SAS"
url = "https://github.com/dpiparo/SAS/archive/0.1.3.tar.gz"
version('0.2.0', sha256='a369e56f8edc61dbf59ae09dbb11d98bc05fd337c5e47e13af9c913bf7bfc538')
version('0.1.4', sha256='9b2a3436efe3c8060ee4882f3ed37d848ee79a63d6055a71a23fad6409559f40')
version('0.1.3', sha256='93c3194bb7d518c215e79436bfb43304683832b3cc66bfc838f6195ce4574943')
depends_on('python@2.7:')
depends_on('llvm@3.5:')
depends_on('cmake@2.8:', type='build')
def cmake_args(self):
args = [
'-DLLVM_DEV_DIR=%s' % self.spec['llvm'].prefix
]
return args
| 34.103448 | 95 | 0.701719 |
4306dacbafa60704f07c505bdf331f464fa6e8c5 | 742 | py | Python | ct-ep100/v1.0.x/ja/autogen-openapi-generator/python/test/test_alert_target.py | y2kblog/poe-webapi-sensor-api | 7c21c88e4a7f74f7bc09c5d4dfc9ff352a98d458 | [
"MIT"
] | null | null | null | ct-ep100/v1.0.x/ja/autogen-openapi-generator/python/test/test_alert_target.py | y2kblog/poe-webapi-sensor-api | 7c21c88e4a7f74f7bc09c5d4dfc9ff352a98d458 | [
"MIT"
] | null | null | null | ct-ep100/v1.0.x/ja/autogen-openapi-generator/python/test/test_alert_target.py | y2kblog/poe-webapi-sensor-api | 7c21c88e4a7f74f7bc09c5d4dfc9ff352a98d458 | [
"MIT"
] | null | null | null | """
PoE対応 WebAPI CO2センサ API仕様
\"Try it out\"機能は、API仕様を製品と同一ネットワーク上のローカルPCにダウンロードしブラウザで開くことで利用できます。 # noqa: E501
The version of the OpenAPI document: 1.0.x
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ct_ep100_client
from ct_ep100_client.model.alert_target import AlertTarget
class TestAlertTarget(unittest.TestCase):
"""AlertTarget unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertTarget(self):
"""Test AlertTarget"""
# FIXME: construct object with mandatory attributes with example values
# model = AlertTarget() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.611111 | 87 | 0.681941 |
e4331de6a2d9b126a63e55d898f688c2f482d013 | 1,266 | py | Python | python/test/function/test_maximum_scalar.py | tasugi/nnabla | cf54d64cc0448c8ea63b2a8e9a7999963f3c169e | [
"Apache-2.0"
] | 1 | 2018-10-27T11:45:43.000Z | 2018-10-27T11:45:43.000Z | python/test/function/test_maximum_scalar.py | tasugi/nnabla | cf54d64cc0448c8ea63b2a8e9a7999963f3c169e | [
"Apache-2.0"
] | null | null | null | python/test/function/test_maximum_scalar.py | tasugi/nnabla | cf54d64cc0448c8ea63b2a8e9a7999963f3c169e | [
"Apache-2.0"
] | 1 | 2020-08-19T08:32:51.000Z | 2020-08-19T08:32:51.000Z | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('MaximumScalar')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("val", [0.5, 1, 2])
def test_maximum_scalar_forward_backward(seed, val, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
function_tester(rng, F.maximum_scalar, np.maximum, inputs,
func_args=[val],
ctx=ctx, func_name=func_name)
| 37.235294 | 74 | 0.733807 |
cd0041c512292d83d35013273371751a1ca02944 | 1,963 | py | Python | tests/commands/test_boards.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | null | null | null | tests/commands/test_boards.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | null | null | null | tests/commands/test_boards.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from platformio.commands.boards import cli as cmd_boards
from platformio.commands.platform import platform_search as cmd_platform_search
def test_board_json_output(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_boards, ["mbed", "--json-output"])
validate_cliresult(result)
boards = json.loads(result.output)
assert isinstance(boards, list)
assert any(["mbed" in b['frameworks'] for b in boards])
def test_board_raw_output(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_boards, ["espidf"])
validate_cliresult(result)
assert "espressif32" in result.output
def test_board_options(clirunner, validate_cliresult):
required_opts = set(
["fcpu", "frameworks", "id", "mcu", "name", "platform"])
# fetch available platforms
result = clirunner.invoke(cmd_platform_search, ["--json-output"])
validate_cliresult(result)
search_result = json.loads(result.output)
assert isinstance(search_result, list)
assert len(search_result)
platforms = [item['name'] for item in search_result]
result = clirunner.invoke(cmd_boards, ["mbed", "--json-output"])
validate_cliresult(result)
boards = json.loads(result.output)
for board in boards:
assert required_opts.issubset(set(board))
assert board['platform'] in platforms
| 36.351852 | 79 | 0.738665 |
2b71591838a076fbbb757011b26ca8ad21672823 | 3,027 | py | Python | woeip/apps/air_quality/models.py | kdoby/woeip | 036d56dd7e031ca40583d4e551871c80adc36be0 | [
"MIT"
] | null | null | null | woeip/apps/air_quality/models.py | kdoby/woeip | 036d56dd7e031ca40583d4e551871c80adc36be0 | [
"MIT"
] | null | null | null | woeip/apps/air_quality/models.py | kdoby/woeip | 036d56dd7e031ca40583d4e551871c80adc36be0 | [
"MIT"
] | null | null | null | import os.path as op
from django.contrib.gis.db.models import LineStringField, PointField
from django.db import models
from django_extensions.db.models import TimeStampedModel
from woeip.apps.core.models import User
class Route(models.Model):
name = models.CharField(max_length=256, unique=True)
path = LineStringField()
def __str__(self):
return self.name
class Device(models.Model):
name = models.CharField(max_length=256)
manufacturer = models.CharField(max_length=256)
serial_number = models.CharField(max_length=256)
model_number = models.CharField(max_length=256)
calibration_date = models.DateField()
firmware_version = models.CharField(max_length=256)
def __str__(self):
return f"{self.name} {self.model_number} {self.serial_number}"
class Sensor(models.Model):
"""A sensor is something that measures something, i.e., it produces a single
measurement value at a time.
"""
name = models.CharField(max_length=256)
unit_choices = (('mg/m3', 'mg/m3'), ('ppm', 'ppm'), ('g/m3', 'g/m3'), ('PM10', 'PM10'),
('PM2.5', 'PM2.5'),
('μg/m3', 'μg/m3'), ('latlong', 'latitude/longitude'))
unit = models.CharField(max_length=256, choices=unit_choices,
help_text="Measurement unit, e.g., mg/m3, ppm, etc.")
device = models.ForeignKey(Device, on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return f"{self.name} ({self.device.name})"
class Session(models.Model):
"""A single air quality outing. Can link to several SessionData, e.g., raw data files."""
date_collected = models.DateTimeField()
route = models.ForeignKey(Route, on_delete=models.SET_NULL, blank=True, null=True)
collected_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return f"{self.date_collected} {self.collected_by}"
class SessionData(TimeStampedModel):
"""The raw data files generated during a session. Assumes one general and one gps file.
Multiple sensors can be linked to one session"""
sensor_file = models.FileField(upload_to='sensor_files', default="")
gps_file = models.FileField(upload_to='gps_files', default="")
sensor = models.ForeignKey(Sensor, on_delete=models.SET_NULL, blank=True, null=True)
session = models.ForeignKey(Session, on_delete=models.CASCADE, blank=True, null=True)
uploaded_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True)
class Meta:
unique_together = ('sensor', 'session')
def __str__(self):
name = op.basename(self.sensor_file.name)
return name
class Data(models.Model):
"""A table of all air quality measurements (all sessions). Individual sessions can be extracted
by filtering on "session"""
session = models.ForeignKey(Session, on_delete=models.CASCADE)
value = models.FloatField()
time = models.DateTimeField()
latlon = PointField()
| 37.8375 | 99 | 0.694087 |
10310ec5c8e235cd47457cd317163d7765c2e10d | 2,840 | py | Python | Models.py | shivgahlout/SDCT-AuxNet-theta-DCT-Augmented-Stain-Deconvolutional-CNN-with-Auxiliary-Classifier-for-Cancer | c44536e6a88a90adbe3759da36ec8991db899245 | [
"MIT"
] | 1 | 2021-07-01T23:00:01.000Z | 2021-07-01T23:00:01.000Z | Models.py | shivgahlout/SDCT-AuxNet-theta-DCT-Augmented-Stain-Deconvolutional-CNN-with-Auxiliary-Classifier-for-Cancer | c44536e6a88a90adbe3759da36ec8991db899245 | [
"MIT"
] | null | null | null | Models.py | shivgahlout/SDCT-AuxNet-theta-DCT-Augmented-Stain-Deconvolutional-CNN-with-Auxiliary-Classifier-for-Cancer | c44536e6a88a90adbe3759da36ec8991db899245 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from sd_layer import *
class BasicConv(nn.Module):
def __init__(self, in_f, out_f, kernel_size, stride=1, padding=1):
super(BasicConv, self).__init__()
self.conv = nn.Conv2d(in_f, out_f, kernel_size, stride, padding)
self.bn = nn.BatchNorm2d(out_f)
self.relu = nn.ReLU()
self._initialize_weights()
def forward(self, inputs):
out = self.relu(self.bn(self.conv(inputs)))
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class network(nn.Module):
def __init__(self, image_name, gpu_no):
super(network, self).__init__()
self.sd_layer = sd_layer_pytorch_modular_dct_no_threshold_trainable(image_name, gpu_no)
layers = []
layers.append(BasicConv(3,16,5,2,1))
layers.append(BasicConv(16,16,3,1,1))
layers.append(nn.MaxPool2d(2))
layers.append(BasicConv(16,32,3,1,1))
layers.append(nn.MaxPool2d(2))
layers.append(BasicConv(32,48,3,1,1))
layers.append(nn.MaxPool2d(2))
layers.append(BasicConv(48,64,3,1,1))
layers.append(nn.MaxPool2d(2))
layers.append(BasicConv(64,64,3,1,1))
self.layers = nn.Sequential(*layers)
self.gpu_no = gpu_no
self.linear = nn.Linear(64**2, 2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, inputs):
out = self.sd_layer(inputs).cuda(self.gpu_no)
out = self.layers(out)
out_t = out.view(out.shape[0], out.shape[1],-1).transpose(1,2)
out = out.view(out.shape[0], out.shape[1], -1)
out = torch.bmm(out,out_t)
out = out.view(out.shape[0], -1)
out = torch.sign(out) * torch.sqrt( torch.abs(out) + 1e-5)
out = torch.nn.functional.normalize(out)
out = self.linear(out)
out = F.log_softmax(out, dim=1)
return out
| 32.272727 | 89 | 0.684859 |
f44445d57009d496ba1c85798bc4501ad6d8eac6 | 536 | py | Python | balsam/service/schedulers/__init__.py | felker/balsam | 2a4d2f217daa6265d31a6574ee0228ebedc6381c | [
"BSD-Source-Code"
] | 29 | 2019-03-26T20:54:59.000Z | 2021-02-12T20:25:35.000Z | balsam/service/schedulers/__init__.py | felker/balsam | 2a4d2f217daa6265d31a6574ee0228ebedc6381c | [
"BSD-Source-Code"
] | 25 | 2019-03-27T18:44:12.000Z | 2021-03-16T18:05:02.000Z | balsam/service/schedulers/__init__.py | felker/balsam | 2a4d2f217daa6265d31a6574ee0228ebedc6381c | [
"BSD-Source-Code"
] | 18 | 2019-08-13T19:43:07.000Z | 2021-02-24T22:48:57.000Z | from django.conf import settings
from importlib import import_module
from balsam.service.schedulers import JobEnvironment, JobTemplate
_schedClass = settings.SCHEDULER_CLASS.strip()
_temp = import_module('balsam.service.schedulers.'+_schedClass)
scheduler = _temp.new_scheduler()
JobEnv = JobEnvironment.JobEnvironment(scheduler)
template_path = settings.BALSAM_HOME
template_name = settings.JOB_TEMPLATE
script_template = JobTemplate.ScriptTemplate(template_path, template_name)
__all__ = ['scheduler', 'JobEnv', 'script_template']
| 35.733333 | 74 | 0.833955 |
5e547915d1a0a164a8e4ad8667109fb32aeff05a | 626 | py | Python | hackathon/migrations/0010_award.py | calixo888/hackcollab | e3250b658420e84b6c106bde3f84523e4347e4df | [
"MIT"
] | null | null | null | hackathon/migrations/0010_award.py | calixo888/hackcollab | e3250b658420e84b6c106bde3f84523e4347e4df | [
"MIT"
] | 7 | 2020-02-12T02:29:34.000Z | 2022-02-10T09:12:06.000Z | hackathon/migrations/0010_award.py | calixo888/hackcollab | e3250b658420e84b6c106bde3f84523e4347e4df | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-12-03 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackathon', '0009_teamsubmission'),
]
operations = [
migrations.CreateModel(
name='Award',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField()),
('team_id', models.CharField(max_length=15)),
],
),
]
| 27.217391 | 114 | 0.567093 |
ade78540040d16ac98b41bc475b91646e8a2798c | 9,165 | py | Python | setup.py | RomRuben/corbel-py | 12cc6c6b202f1d246f5af36253d7c3c03e616d41 | [
"MIT"
] | null | null | null | setup.py | RomRuben/corbel-py | 12cc6c6b202f1d246f5af36253d7c3c03e616d41 | [
"MIT"
] | null | null | null | setup.py | RomRuben/corbel-py | 12cc6c6b202f1d246f5af36253d7c3c03e616d41 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
# Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
# Constants
CODE_DIRECTORY = 'corbel'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from corbel import metadata
#
# However, when we do this, we also import `corbel/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
# Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(['flake8',
'--max-complexity=10',
'--hang-closing',
'--format=pylint',
'--exclude=setup.py,pavement.py,conf.py'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'corbel_cli = corbel.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'corbel_gui = corbel.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
| 31.603448 | 113 | 0.657501 |
aa7e43fed13c706b5a4d1a42e29e2910ed3e9323 | 534 | py | Python | plotly/validators/scattergl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/scattergl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/scattergl/marker/line/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scattergl.marker.line',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='calc',
role='style',
colorscale_path='scattergl.marker.line.colorscale',
**kwargs
)
| 25.428571 | 66 | 0.597378 |
e29e2ef84116672d63fb9fac3d32a892f9056b14 | 355 | py | Python | database.py | sixfwa/fastapi-jwt | 23d1b1d96c56e9cf91265494d4edf77ee505a8b7 | [
"MIT"
] | 1 | 2022-03-26T16:34:10.000Z | 2022-03-26T16:34:10.000Z | database.py | sixfwa/fastapi-jwt | 23d1b1d96c56e9cf91265494d4edf77ee505a8b7 | [
"MIT"
] | null | null | null | database.py | sixfwa/fastapi-jwt | 23d1b1d96c56e9cf91265494d4edf77ee505a8b7 | [
"MIT"
] | null | null | null | import sqlalchemy as _sql
import sqlalchemy.ext.declarative as _declarative
import sqlalchemy.orm as _orm
DATABASE_URL = "sqlite:///./database.db"
engine = _sql.create_engine(DATABASE_URL, connect_args={"check_same_thread": False})
SessionLocal = _orm.sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = _declarative.declarative_base() | 32.272727 | 84 | 0.808451 |
6335b8ed8665abc5d24b3d73ebff1697109716cb | 2,313 | py | Python | venv/Lib/site-packages/pygments/lexers/sieve.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 9 | 2019-05-29T23:50:28.000Z | 2021-01-29T20:51:05.000Z | venv/Lib/site-packages/pygments/lexers/sieve.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 5 | 2021-02-27T21:31:47.000Z | 2021-04-05T21:49:38.000Z | venv/Lib/site-packages/pygments/lexers/sieve.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.sieve
~~~~~~~~~~~~~~~~~~~~~
Lexer for Sieve file format.
https://tools.ietf.org/html/rfc5228
https://tools.ietf.org/html/rfc5173
https://tools.ietf.org/html/rfc5229
https://tools.ietf.org/html/rfc5230
https://tools.ietf.org/html/rfc5232
https://tools.ietf.org/html/rfc5235
https://tools.ietf.org/html/rfc5429
https://tools.ietf.org/html/rfc8580
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Literal, String, Text, Punctuation, Keyword
__all__ = ["SieveLexer"]
class SieveLexer(RegexLexer):
"""
Lexer for sieve format.
"""
name = 'Sieve'
filenames = ['*.siv', '*.sieve']
aliases = ['sieve']
tokens = {
'root': [
(r'\s+', Text),
(r'[();,{}\[\]]', Punctuation),
# import:
(r'(?i)require',
Keyword.Namespace),
# tags:
(r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|count|days|detail|domain|fcc|flags|from|handle|importance|is|localpart|length|lowerfirst|lower|matches|message|mime|options|over|percent|quotewildcard|raw|regex|specialuse|subject|text|under|upperfirst|upper|value)',
bygroups(Name.Tag, Name.Tag)),
# tokens:
(r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|ereject|exists|false|fileinto|if|hasflag|header|keep|notify_method_capability|notify|not|redirect|reject|removeflag|setflag|size|spamtest|stop|string|true|vacation|virustest)',
Name.Builtin),
(r'(?i)set',
Keyword.Declaration),
# number:
(r'([0-9.]+)([kmgKMG])?',
bygroups(Literal.Number, Literal.Number)),
# comment:
(r'#.*$',
Comment.Single),
(r'/\*.*\*/',
Comment.Multiline),
# string:
(r'"[^"]*?"',
String),
# text block:
(r'text:',
Name.Tag, 'text'),
],
'text': [
(r'[^.].*?\n', String),
(r'^\.', Punctuation, "#pop"),
]
}
| 33.042857 | 293 | 0.555123 |
c5ac393592b434da034303f258c52ed80f7104b0 | 3,899 | py | Python | alipay/aop/api/request/AlipayTradeRefundRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayTradeRefundRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayTradeRefundRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayTradeRefundModel import AlipayTradeRefundModel
class AlipayTradeRefundRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayTradeRefundModel):
self._biz_content = value
else:
self._biz_content = AlipayTradeRefundModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.trade.refund'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 26.889655 | 148 | 0.638625 |
165ec793228e3706f5d2ae6985104660e7099965 | 49 | py | Python | TrainingRestnet18withTinyImagenetDataset/poc/__init__.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | TrainingRestnet18withTinyImagenetDataset/poc/__init__.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | TrainingRestnet18withTinyImagenetDataset/poc/__init__.py | csharpshooter/DeepLearning | c1d20660c32076468970f7376931e1fcd0d2644e | [
"MIT"
] | null | null | null | from .gradcamexperiment import GradcamExperiment
| 24.5 | 48 | 0.897959 |
909730ab9952881cfff6ed4415009286c86b65bc | 954 | py | Python | Sorting/HeapSort.py | alstn2468/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | 2 | 2020-08-10T09:31:13.000Z | 2020-08-11T07:03:18.000Z | Sorting/HeapSort.py | SikSo1897/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | null | null | null | Sorting/HeapSort.py | SikSo1897/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | 4 | 2019-03-19T06:59:01.000Z | 2020-09-02T04:38:22.000Z | # HeapSort.py
'''
힙정렬(HeapSort)
O(nlogn)
'''
def heapify(list, n, i) :
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and list[i] < list[l] :
largest = l
if r < n and list[largest] < list[r] :
largest = r
if largest != i :
list[i], list[largest] = list[largest], list[i]
heapify(list, n, largest)
def HeapSort(list) :
n = len(list)
for i in range(n, -1, -1) :
heapify(list, n, i)
for i in range(n - 1, 0, -1) :
list[i], list[0] = list[0], list[i]
heapify(list, i, 0)
if __name__ == '__main__' :
# 정렬할 리스트 선언
list = [1, 3, 2, 4, 5, 7, 6, 9, 8]
# 정렬 전 리스트 출력
print('- Before Sorting -')
print(list)
'''
- Before Sorting -
[1, 3, 2, 4, 5, 7, 6, 9, 8]
'''
HeapSort(list)
# 정렬 후 리스트 출력
print('- After Sorting -')
print(list)
'''
- After Sorting -
[1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
| 17.035714 | 55 | 0.458071 |
3eed15dd52a3de94b43c3dac3334383ceb8e9902 | 1,100 | py | Python | geotrek/core/migrations/0014_auto_20200228_1755.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/core/migrations/0014_auto_20200228_1755.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/core/migrations/0014_auto_20200228_1755.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2020-02-28 16:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_auto_20200211_1011'),
]
operations = [
migrations.AlterField(
model_name='path',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='path',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
migrations.AlterField(
model_name='topology',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='topology',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
]
| 30.555556 | 97 | 0.610909 |
87cf2c5da49348aafedd64b7e1fa1cff7d55fa0d | 1,096 | py | Python | fluiddyn/io/test/test_txt.py | fluiddyn/fluiddyn | 04d125cb4590da7d5db80dac1d20577ce012a005 | [
"CECILL-B"
] | 20 | 2018-05-07T13:40:13.000Z | 2022-03-30T10:40:42.000Z | fluiddyn/io/test/test_txt.py | fluiddyn/fluiddyn | 04d125cb4590da7d5db80dac1d20577ce012a005 | [
"CECILL-B"
] | null | null | null | fluiddyn/io/test/test_txt.py | fluiddyn/fluiddyn | 04d125cb4590da7d5db80dac1d20577ce012a005 | [
"CECILL-B"
] | 1 | 2018-11-14T20:41:14.000Z | 2018-11-14T20:41:14.000Z | """
Test txt functions
==================
"""
import os
import unittest
from shutil import rmtree
from ..txt import quantities_from_txt_file, save_quantities_in_txt_file
txt_example = """
zekfzlnfk
zklflzefk
1 3. 1
2 5.
3 4. 9
"""
t_example = ([1, 2, 3], [3.0, 5.0, 4.0], [1, 9])
class TestTxt(unittest.TestCase):
"""Test fluiddyn.io.txt module."""
def setUp(self):
self._work_dir = "test_fluiddyn_io_txt"
if not os.path.exists(self._work_dir):
os.mkdir(self._work_dir)
os.chdir(self._work_dir)
self.path_in = "path_in"
with open(self.path_in, "w") as f:
f.write(txt_example)
def tearDown(self):
os.chdir("..")
rmtree(self._work_dir)
def test_txt(self):
t = quantities_from_txt_file(self.path_in)
for i, l_example in enumerate(t_example):
for j, a in enumerate(l_example):
self.assertEqual(a, t[i][j])
t = ([1, 2, 3],) * 3
save_quantities_in_txt_file("path_out", t)
if __name__ == "__main__":
unittest.main()
| 17.967213 | 71 | 0.588504 |
fdf0ee3868b9c195bd53b54fa0cb50f272b4bbd1 | 1,441 | py | Python | Metrics/Show these in Context-Space.py | laerm0/wei-glyphs-scripts | d2e53004805847722f4ba07072fa50400a7cc4d6 | [
"Apache-2.0"
] | 1 | 2020-06-11T00:29:50.000Z | 2020-06-11T00:29:50.000Z | Metrics/Show these in Context-Space.py | laerm0/wei-glyphs-scripts | d2e53004805847722f4ba07072fa50400a7cc4d6 | [
"Apache-2.0"
] | null | null | null | Metrics/Show these in Context-Space.py | laerm0/wei-glyphs-scripts | d2e53004805847722f4ba07072fa50400a7cc4d6 | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Show these in Context (Space separated items)
# -*- coding: utf-8 -*-
__doc__="""
Show selected items, each separated by /space, in spacing context in a new tab.
"""
import GlyphsApp
import kernMakerFunc
reload(kernMakerFunc)
from kernMakerFunc import kernMaker
# Glyphs.clearLog()
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedLayers = Font.selectedLayers
editString = ""
# Get the name of each selected glyph and insert a '/space\n/space' for new line character instead (/space added to slit this into it's own item)
namesOfSelectedGlyphs = ''.join([ "/%s" % l.parent.name if hasattr(l.parent, 'name') else '/space\n/space' for l in selectedLayers ])
# namesOfSelectedGlyphs = ''.join([ "/%s" % l.parent.name for l in selectedLayers if hasattr(l.parent, 'name')])
originalCharString = ''.join([ "/%s" % l.parent.name if hasattr(l.parent, 'name') else '\n' for l in selectedLayers ])
editList = namesOfSelectedGlyphs.split('/space')
# Removed blank items which were added as a result of filtering out new line characters
editList = filter(None, editList)
print editList
for eachItem in editList:
print eachItem, type(eachItem)
if eachItem == u"\n":
editString += "\n"
else:
editString += kernMaker(eachItem)
editString += "\n"
editString = "{0}\n{1}".format(originalCharString, editString)
# print editString
# callAfter( Doc.windowController().addTabWithString_, editString )
Font.newTab(editString) | 34.309524 | 145 | 0.734906 |
11f0ead1c32c5b8e182ef257932489114b82701e | 8,234 | py | Python | src/_stories/failures.py | gtors/stories | 0614624f472151f20617afa4e6c4a0af9b409b6d | [
"BSD-2-Clause"
] | null | null | null | src/_stories/failures.py | gtors/stories | 0614624f472151f20617afa4e6c4a0af9b409b6d | [
"BSD-2-Clause"
] | null | null | null | src/_stories/failures.py | gtors/stories | 0614624f472151f20617afa4e6c4a0af9b409b6d | [
"BSD-2-Clause"
] | null | null | null | from _stories.compat import Enum
from _stories.compat import EnumMeta
from _stories.exceptions import FailureProtocolError
# Data type.
def check_data_type(failures):
if failures is None:
return
if isinstance(failures, EnumMeta):
return
if isinstance(failures, (list, tuple, set, frozenset)) and all(
isinstance(failure, str) for failure in failures
):
return
message = wrong_type_template.format(failures=failures)
raise FailureProtocolError(message)
def failures_representation(failures):
if isinstance(failures, EnumMeta):
return ", ".join(map(repr, failures.__members__.values()))
elif isinstance(failures, (list, tuple, set, frozenset)):
return ", ".join(map(repr, failures))
elif failures is None:
return "None"
def collection_contains(reason, failures):
return reason in failures
def collection_compare(a, b):
return a == b
def enumeration_contains(reason, failures):
return isinstance(reason, Enum) and reason.name in failures.__members__
def enumeration_compare(a, b):
return a.name == b.name
# Execute.
def make_exec_protocol(failures):
if isinstance(failures, EnumMeta):
return NotNullExecProtocol(failures, enumeration_contains)
elif isinstance(failures, (list, tuple, set, frozenset)):
return NotNullExecProtocol(failures, collection_contains)
elif failures is None:
return NullExecProtocol()
class NullExecProtocol(object):
def check_return_statement(self, method, reason):
if reason:
message = null_protocol_template.format(
reason=reason,
cls=method.__self__.__class__.__name__,
method=method.__name__,
)
raise FailureProtocolError(message)
class DisabledNullExecProtocol(NullExecProtocol):
def check_return_statement(self, method, reason):
if not reason:
message = disabled_null_template.format(
cls=method.__self__.__class__.__name__, method=method.__name__
)
raise FailureProtocolError(message)
super(DisabledNullExecProtocol, self).check_return_statement(method, reason)
class NotNullExecProtocol(object):
def __init__(self, failures, contains_func):
self.failures = failures
self.contains_func = contains_func
def check_return_statement(self, method, reason):
if not reason:
message = null_reason_template.format(
available=failures_representation(self.failures),
cls=method.__self__.__class__.__name__,
method=method.__name__,
)
raise FailureProtocolError(message)
if not self.contains_func(reason, self.failures):
message = wrong_reason_template.format(
reason=reason,
available=failures_representation(self.failures),
cls=method.__self__.__class__.__name__,
method=method.__name__,
)
raise FailureProtocolError(message)
# Run.
def make_run_protocol(failures, cls_name, method_name):
if isinstance(failures, EnumMeta):
return NotNullRunProtocol(
cls_name, method_name, failures, enumeration_contains, enumeration_compare
)
elif isinstance(failures, (list, tuple, set, frozenset)):
return NotNullRunProtocol(
cls_name, method_name, failures, collection_contains, collection_compare
)
elif failures is None:
return NullRunProtocol(cls_name, method_name)
class NullRunProtocol(object):
def __init__(self, cls_name, method_name):
self.cls_name = cls_name
self.method_name = method_name
def check_failed_because_argument(self, reason):
message = null_summary_template.format(
cls=self.cls_name, method=self.method_name
)
raise FailureProtocolError(message)
class NotNullRunProtocol(object):
def __init__(self, cls_name, method_name, failures, contains_func, compare_func):
self.cls_name = cls_name
self.method_name = method_name
self.failures = failures
self.contains_func = contains_func
self.compare_func = compare_func
def check_failed_because_argument(self, reason):
if not self.contains_func(reason, self.failures):
message = wrong_summary_template.format(
reason=reason,
available=failures_representation(self.failures),
cls=self.cls_name,
method=self.method_name,
)
raise FailureProtocolError(message)
def compare_failed_because_argument(self, argument, failure_reason):
return self.compare_func(argument, failure_reason)
# Wrap.
def combine_failures(
first_failures,
first_cls_name,
first_method_name,
second_failures,
second_cls_name,
second_method_name,
):
if first_failures is None:
return second_failures
elif second_failures is None:
return first_failures
elif isinstance(first_failures, EnumMeta) and isinstance(second_failures, EnumMeta):
return Enum(
first_failures.__name__,
",".join(
list(first_failures.__members__.keys())
+ [
failure
for failure in second_failures.__members__.keys()
if failure not in first_failures.__members__.keys()
]
),
)
elif isinstance(first_failures, (list, tuple, set, frozenset)) and isinstance(
second_failures, (list, tuple, set, frozenset)
):
return first_failures + [
failure for failure in second_failures if failure not in first_failures
]
else:
message = type_error_template.format(
cls=first_cls_name,
method=first_method_name,
available=failures_representation(first_failures),
other_cls=second_cls_name,
other_method=second_method_name,
other_available=failures_representation(second_failures),
)
raise FailureProtocolError(message)
def maybe_disable_null_protocol(methods, reasons):
if reasons is None:
return methods
disabled = DisabledNullExecProtocol()
return [
(method, contract, disabled if type(protocol) is NullExecProtocol else protocol)
for method, contract, protocol in methods
]
# Messages.
wrong_type_template = """
Unexpected type for story failure protocol: {failures!r}
""".strip()
wrong_reason_template = """
Failure({reason!r}) failure reason is not allowed by current protocol.
Available failures are: {available}
Function returned value: {cls}.{method}
""".strip()
null_reason_template = """
Failure() can not be used in a story with failure protocol.
Available failures are: {available}
Function returned value: {cls}.{method}
Use one of them as Failure() argument.
""".strip()
null_protocol_template = """
Failure({reason!r}) can not be used in a story without failure protocol.
Function returned value: {cls}.{method}
Use 'failures' story method to define failure protocol.
""".strip()
wrong_summary_template = """
'failed_because' method got argument mismatching failure protocol: {reason!r}
Available failures are: {available}
Story returned result: {cls}.{method}
""".strip()
null_summary_template = """
'failed_because' method can not be used with story defined without failure protocol.
Story returned result: {cls}.{method}
Use 'failures' story method to define failure protocol.
""".strip()
type_error_template = """
Story and substory failure protocols has incompatible types:
Story method: {cls}.{method}
Story failure protocol: {available}
Substory method: {other_cls}.{other_method}
Substory failure protocol: {other_available}
""".strip()
disabled_null_template = """
Failure() can not be used in a story composition.
Different types of failure protocol were used in parent and substory definitions.
Function returned value: {cls}.{method}
Use 'failures' story method to define failure protocol.
""".strip()
| 28.590278 | 88 | 0.681321 |
e942ca19ebbe7f849985928133e793af9608a3f5 | 1,785 | py | Python | src/datafinder/gui/admin/__init__.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 9 | 2016-05-25T06:12:52.000Z | 2021-04-30T07:22:48.000Z | src/datafinder/gui/admin/__init__.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 6 | 2016-03-29T13:38:18.000Z | 2017-01-18T15:57:42.000Z | src/datafinder/gui/admin/__init__.py | schlauch/DataFinder | 958fda4f3064f9f6b2034da396a20ac9d9abd52f | [
"BSD-3-Clause"
] | 7 | 2016-06-15T12:01:22.000Z | 2022-03-05T08:50:25.000Z | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the administration client.
"""
__version__ = "$Revision-Id:$"
| 40.568182 | 73 | 0.747339 |
c40e9b0e89dfb18b40f1d5d75b08ed644759708d | 9,409 | py | Python | scripts/deployment/deploy_testnet.py | demmojo/curve-dao-contracts | 6922cd98c7403cc7c6302f5379194c5418c5cb66 | [
"MIT"
] | 1 | 2021-07-22T16:28:56.000Z | 2021-07-22T16:28:56.000Z | scripts/deployment/deploy_testnet.py | demmojo/curve-dao-contracts | 6922cd98c7403cc7c6302f5379194c5418c5cb66 | [
"MIT"
] | null | null | null | scripts/deployment/deploy_testnet.py | demmojo/curve-dao-contracts | 6922cd98c7403cc7c6302f5379194c5418c5cb66 | [
"MIT"
] | 1 | 2021-03-07T15:13:49.000Z | 2021-03-07T15:13:49.000Z | # Testnet deployment script
import json
import time
from brownie import (
ERC20,
ERC20CRV,
ERC20LP,
CurvePool,
CurveRewards,
GaugeController,
LiquidityGauge,
LiquidityGaugeReward,
Minter,
PoolProxy,
VestingEscrow,
VotingEscrow,
accounts,
web3,
)
from web3 import middleware
from web3.gas_strategies.time_based import fast_gas_price_strategy as gas_strategy
USE_STRATEGIES = False # Needed for the ganache-cli tester which doesn't like middlewares
POA = True
DEPLOYER = "0xFD3DeCC0cF498bb9f54786cb65800599De505706"
ARAGON_AGENT = "0x22D61abd46F14D40Ca9bF8eDD9445DCF29208589"
DISTRIBUTION_AMOUNT = 10 ** 6 * 10 ** 18
DISTRIBUTION_ADDRESSES = [
"0x39415255619783A2E71fcF7d8f708A951d92e1b6",
"0x6cd85bbb9147b86201d882ae1068c67286855211",
]
VESTING_ADDRESSES = ["0x6637e8531d68917f5Ec31D6bA5fc80bDB34d9ef1"]
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
CONFS = 1
def repeat(f, *args):
"""
Repeat when geth is not broadcasting (unaccounted error)
"""
while True:
try:
return f(*args)
except KeyError:
continue
def save_abi(contract, name):
with open("%s.abi" % name, "w") as f:
json.dump(contract.abi, f)
def deploy_erc20s_and_pool(deployer):
coin_a = repeat(ERC20.deploy, "Coin A", "USDA", 18, {"from": deployer, "required_confs": CONFS})
repeat(
coin_a._mint_for_testing, 10 ** 9 * 10 ** 18, {"from": deployer, "required_confs": CONFS},
)
coin_b = repeat(ERC20.deploy, "Coin B", "USDB", 18, {"from": deployer, "required_confs": CONFS})
repeat(
coin_b._mint_for_testing, 10 ** 9 * 10 ** 18, {"from": deployer, "required_confs": CONFS},
)
lp_token = repeat(
ERC20LP.deploy, "Some pool", "cPool", 18, 0, {"from": deployer, "required_confs": CONFS},
)
save_abi(lp_token, "lp_token")
pool = repeat(
CurvePool.deploy,
[coin_a, coin_b],
lp_token,
100,
4 * 10 ** 6,
{"from": deployer, "required_confs": CONFS},
)
save_abi(pool, "curve_pool")
repeat(lp_token.set_minter, pool, {"from": deployer, "required_confs": CONFS})
# registry = repeat(
# Registry.deploy, [ZERO_ADDRESS] * 4, {"from": deployer, "required_confs": CONFS}
# )
# save_abi(registry, "registry")
for account in DISTRIBUTION_ADDRESSES:
repeat(
coin_a.transfer,
account,
DISTRIBUTION_AMOUNT,
{"from": deployer, "required_confs": CONFS},
)
repeat(
coin_b.transfer,
account,
DISTRIBUTION_AMOUNT,
{"from": deployer, "required_confs": CONFS},
)
repeat(
pool.commit_transfer_ownership, ARAGON_AGENT, {"from": deployer, "required_confs": CONFS},
)
repeat(pool.apply_transfer_ownership, {"from": deployer, "required_confs": CONFS})
# repeat(
# registry.commit_transfer_ownership,
# ARAGON_AGENT,
# {"from": deployer, "required_confs": CONFS},
# )
# repeat(registry.apply_transfer_ownership, {"from": deployer, "required_confs": CONFS})
return [lp_token, coin_a]
def main():
if USE_STRATEGIES:
web3.eth.setGasPriceStrategy(gas_strategy)
web3.middleware_onion.add(middleware.time_based_cache_middleware)
web3.middleware_onion.add(middleware.latest_block_based_cache_middleware)
web3.middleware_onion.add(middleware.simple_cache_middleware)
if POA:
web3.middleware_onion.inject(middleware.geth_poa_middleware, layer=0)
deployer = accounts.at(DEPLOYER)
# deploy pools and gauges
coin_a = repeat(ERC20.deploy, "Coin A", "USDA", 18, {"from": deployer, "required_confs": CONFS})
repeat(
coin_a._mint_for_testing, 10 ** 9 * 10 ** 18, {"from": deployer, "required_confs": CONFS},
)
coin_b = repeat(ERC20.deploy, "Coin B", "USDB", 18, {"from": deployer, "required_confs": CONFS})
repeat(
coin_b._mint_for_testing, 10 ** 9 * 10 ** 18, {"from": deployer, "required_confs": CONFS},
)
lp_token = repeat(
ERC20LP.deploy, "Some pool", "cPool", 18, 0, {"from": deployer, "required_confs": CONFS},
)
save_abi(lp_token, "lp_token")
pool = repeat(
CurvePool.deploy,
[coin_a, coin_b],
lp_token,
100,
4 * 10 ** 6,
{"from": deployer, "required_confs": CONFS},
)
save_abi(pool, "curve_pool")
repeat(lp_token.set_minter, pool, {"from": deployer, "required_confs": CONFS})
repeat(
coin_a.transfer,
"0x6cd85bbb9147b86201d882ae1068c67286855211",
DISTRIBUTION_AMOUNT,
{"from": deployer, "required_confs": CONFS},
)
repeat(
coin_b.transfer,
"0x6cd85bbb9147b86201d882ae1068c67286855211",
DISTRIBUTION_AMOUNT,
{"from": deployer, "required_confs": CONFS},
)
contract = repeat(
CurveRewards.deploy, lp_token, coin_a, {"from": accounts[0], "required_confs": CONFS},
)
repeat(
contract.setRewardDistribution, accounts[0], {"from": accounts[0], "required_confs": CONFS},
)
repeat(
coin_a.transfer, contract, 100e18, {"from": accounts[0], "required_confs": CONFS},
)
liquidity_gauge_rewards = repeat(
LiquidityGaugeReward.deploy,
lp_token,
"0xbE45e0E4a72aEbF9D08F93E64701964d2CC4cF96",
contract,
coin_a,
{"from": deployer, "required_confs": CONFS},
)
coins = deploy_erc20s_and_pool(deployer)
lp_token = coins[0]
coin_a = coins[1]
token = repeat(
ERC20CRV.deploy, "Curve DAO Token", "CRV", 18, {"from": deployer, "required_confs": CONFS},
)
save_abi(token, "token_crv")
escrow = repeat(
VotingEscrow.deploy,
token,
"Vote-escrowed CRV",
"veCRV",
"veCRV_0.99",
{"from": deployer, "required_confs": CONFS},
)
save_abi(escrow, "voting_escrow")
repeat(
escrow.changeController, ARAGON_AGENT, {"from": deployer, "required_confs": CONFS},
)
for account in DISTRIBUTION_ADDRESSES:
repeat(
token.transfer,
account,
DISTRIBUTION_AMOUNT,
{"from": deployer, "required_confs": CONFS},
)
gauge_controller = repeat(
GaugeController.deploy, token, escrow, {"from": deployer, "required_confs": CONFS},
)
save_abi(gauge_controller, "gauge_controller")
minter = repeat(
Minter.deploy, token, gauge_controller, {"from": deployer, "required_confs": CONFS},
)
save_abi(minter, "minter")
liquidity_gauge = repeat(
LiquidityGauge.deploy, lp_token, minter, {"from": deployer, "required_confs": CONFS},
)
save_abi(liquidity_gauge, "liquidity_gauge")
contract = repeat(
CurveRewards.deploy, lp_token, coin_a, {"from": accounts[0], "required_confs": CONFS},
)
repeat(
contract.setRewardDistribution, accounts[0], {"from": accounts[0], "required_confs": CONFS},
)
repeat(
coin_a.transfer, contract, 100e18, {"from": accounts[0], "required_confs": CONFS},
)
liquidity_gauge_rewards = repeat(
LiquidityGaugeReward.deploy,
lp_token,
minter,
contract,
coin_a,
{"from": deployer, "required_confs": CONFS},
)
repeat(token.set_minter, minter, {"from": deployer, "required_confs": CONFS})
repeat(
gauge_controller.add_type, b"Liquidity", {"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.change_type_weight,
0,
10 ** 18,
{"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.add_gauge,
liquidity_gauge,
0,
10 ** 18,
{"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.add_type, b"LiquidityRewards", {"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.change_type_weight,
1,
10 ** 18,
{"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.add_gauge,
liquidity_gauge_rewards,
1,
10 ** 18,
{"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.commit_transfer_ownership,
ARAGON_AGENT,
{"from": deployer, "required_confs": CONFS},
)
repeat(
gauge_controller.apply_transfer_ownership, {"from": deployer, "required_confs": CONFS},
)
repeat(
escrow.commit_transfer_ownership, ARAGON_AGENT, {"from": deployer, "required_confs": CONFS},
)
repeat(escrow.apply_transfer_ownership, {"from": deployer, "required_confs": CONFS})
repeat(PoolProxy.deploy, {"from": deployer, "required_confs": CONFS})
vesting = repeat(
VestingEscrow.deploy,
token,
time.time() + 300,
"1628364267",
False,
{"from": deployer, "required_confs": CONFS},
)
save_abi(vesting, "vesting")
repeat(token.approve, vesting, 1000e18, {"from": deployer, "required_confs": CONFS})
repeat(
vesting.fund,
VESTING_ADDRESSES + ["0x0000000000000000000000000000000000000000"] * 9,
[1000e18] + [0] * 9,
{"from": deployer, "required_confs": CONFS},
)
| 29.403125 | 100 | 0.623977 |
db4a0248e151a8ad8b1394d2cbe5e868ee7116a1 | 94 | py | Python | biovis_media_extension/convertors/__init__.py | go-choppy/choppy-report | f233ba3b2eaaa9af8936b736ede25233a043dde5 | [
"MIT"
] | 1 | 2019-07-02T08:37:30.000Z | 2019-07-02T08:37:30.000Z | biovis_media_extension/convertors/__init__.py | go-choppy/choppy-report | f233ba3b2eaaa9af8936b736ede25233a043dde5 | [
"MIT"
] | null | null | null | biovis_media_extension/convertors/__init__.py | go-choppy/choppy-report | f233ba3b2eaaa9af8936b736ede25233a043dde5 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
internal_convertors = {
} | 15.666667 | 39 | 0.691489 |
aea969de4f7973681f0ea655b28ca7dbf6e60396 | 2,069 | py | Python | tests/regressiontests/null_fk_ordering/tests.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.5/tests/regressiontests/null_fk_ordering/tests.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.5/tests/regressiontests/null_fk_ordering/tests.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from __future__ import absolute_import
from django.test import TestCase
from .models import Author, Article, SystemInfo, Forum, Post, Comment
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
article_1 = Article.objects.create(title='No author on this article')
article_2 = Article.objects.create(author=author_1, title='This article written by Tom Jones')
article_3 = Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertTrue(len(list(Article.objects.all())) == 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
c3 = Comment.objects.create(comment_text='Another first comment')
c4 = Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertTrue(len(list(Comment.objects.all())) == 4)
| 48.116279 | 102 | 0.692122 |
82479bfdafd34ce36b9ce0e1e18d5e62e25e681f | 3,836 | py | Python | tests/test_annotate.py | pwwang/pyppl_annotate | 6eb9a51e3acef2bc3b79b33eec572d6b66b01f9d | [
"MIT"
] | 1 | 2021-09-09T22:12:31.000Z | 2021-09-09T22:12:31.000Z | tests/test_annotate.py | pwwang/pyppl_annotate | 6eb9a51e3acef2bc3b79b33eec572d6b66b01f9d | [
"MIT"
] | null | null | null | tests/test_annotate.py | pwwang/pyppl_annotate | 6eb9a51e3acef2bc3b79b33eec572d6b66b01f9d | [
"MIT"
] | 1 | 2021-09-10T00:16:48.000Z | 2021-09-10T00:16:48.000Z | import pytest
from diot import Diot
import pyppl_annotate as pan
from pyppl.proc import Proc
@pytest.mark.parametrize('text,expect', [
("""\
desc1
desc2
@section1:
sec1
subsec
sec2
""", dict(description = """\
desc1
desc2
""", section1 = """\
sec1
subsec
sec2
""")),
("""\
@description:
desc1
desc2
@section1:
sec1
subsec
sec2
""", dict(description = """\
desc1
desc2
""", section1 = """\
sec1
subsec
sec2
"""))
])
def test_parse_sections(text, expect):
assert pan._sections_parser(text) == expect
@pytest.mark.parametrize('text, expect', [
("""\
infile (file): description. Default: 123
- subdesc1
- subdesc2
infile2: someother description
""", {
'infile': {'type': 'file', 'desc': 'description.\n - subdesc1\n - subdesc2\n', 'default': '123'},
'infile2': {'type': '', 'desc': 'someother description\n', 'default': None}
})
])
def test_options_parser(text, expect):
assert pan._options_parser(text) == expect
def test_options_parser_error():
with pytest.raises(ValueError):
pan._options_parser("""\
abc""")
pan._options_parser("""abc: """) == {'abc': dict(type = '', default = '', desc = '')}
with pytest.raises(ValueError):
pan._options_parser("a")
@pytest.mark.parametrize('text, proc, expect', [
("infile: abc", Proc(input = 'infile:file'), {'infile': {'type': 'file', 'default': None, 'desc': 'abc\n'}}),
("infile: abc", Proc(input = {'infile:file': [1]}), {'infile': {'type': 'file', 'default': None, 'desc': 'abc\n'}}),
("invar: abc", Proc(input = {'invar': [1]}), {'invar': {'type': 'var', 'default': None, 'desc': 'abc\n'}}),
("", Proc(input = {'invar': [1]}), {'invar': {'type': 'var', 'default': '', 'desc': ''}})
])
def test_input_formatter(text, proc, expect):
assert pan._input_formatter(text, proc) == expect
@pytest.mark.parametrize('text,proc,expect', [
('outfile (var): a', Proc(output = 'outfile:file:abc'), {'outfile': {'type': 'file', 'default': 'abc', 'desc': 'a\n'}}),
('', Proc(output = 'outfile:file:abc'), {'outfile': {'type': 'file', 'default': 'abc', 'desc': ''}}),
])
def test_output_formatter(text, proc, expect):
assert pan._output_formatter(text, proc) == expect
@pytest.mark.parametrize('text,proc,expect', [
('params (Diot): ddd', Proc(args = Diot(params = {'a': 1})), {'params': {'type': 'Diot', 'desc': 'ddd\n', 'default': {'a': 1}}}),
('', Proc(args = Diot(params = {'a': 1})), {'params': {'type': 'Diot', 'desc': '', 'default': {'a': 1}}}),
('params: ddd', Proc(args = Diot(params = {'a': 1})), {'params': {'type': 'Diot', 'desc': 'ddd\n', 'default': {'a': 1}}}),
])
def test_args_formatter(text, proc, expect):
assert pan._args_formatter(text, proc) == expect
def test_config_formatter():
p = Proc()
p.config.report_template = 'abc'
assert pan._config_formatter('', p) == {}
assert pan._config_formatter('report_template:', p) == {'report_template': {'type': 'str', 'desc': '\n', 'default': 'abc'}}
def test_annotate():
anno = pan.Annotate("""
@description:
desc1
desc2
@sec1:
sec1
- subsec1
""", Proc(id = 'pAnnotate'))
assert anno.description == "desc1\ndesc2\n"
assert anno.section('sec') is None
assert anno.section('sec1') == "sec1\n\t- subsec1\n"
assert anno.section('sec1', lambda x: x.splitlines()) == ["sec1", "\t- subsec1"]
def test_hook():
pHook = Proc(config = Diot(annotate = """
abc
"""))
pan.proc_init(pHook)
assert pHook.config.annotate.description == 'abc\n'
def test_input():
pInput = Proc()
pan.proc_init(pInput)
pInput.config.annotate = 'x\n@input:\n@output:\n@config:\n@args:'
pInput.input = {'a': [1]}
assert pInput.config.annotate.description == 'x\n'
assert pInput.config.annotate.input == {'a': {'default': '',
'desc': '',
'type': 'var'}}
assert pInput.config.annotate.output == {}
assert pInput.config.annotate.config == {}
assert pInput.config.annotate.args == {}
| 29.736434 | 130 | 0.623306 |
3bd86ee66f29f779f85aae44cb08b2a151cda0d6 | 30,854 | py | Python | airflow/models/dagrun.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 1 | 2021-03-03T07:00:02.000Z | 2021-03-03T07:00:02.000Z | airflow/models/dagrun.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 36 | 2021-11-26T00:08:49.000Z | 2021-11-26T00:09:33.000Z | airflow/models/dagrun.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 3 | 2020-06-30T02:38:17.000Z | 2022-01-19T06:14:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import TYPE_CHECKING, Any, Iterable, List, NamedTuple, Optional, Tuple, Union
from sqlalchemy import (
Boolean,
Column,
DateTime,
Index,
Integer,
PickleType,
String,
UniqueConstraint,
and_,
func,
or_,
)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship, synonym
from sqlalchemy.orm.session import Session
from airflow import settings
from airflow.configuration import conf as airflow_conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models.base import ID_LEN, Base
from airflow.models.taskinstance import TaskInstance as TI
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_states import SCHEDULEABLE_STATES
from airflow.utils import callback_requests, timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime, nulls_first, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
if TYPE_CHECKING:
from airflow.models.dag import DAG
class TISchedulingDecision(NamedTuple):
"""Type of return for DagRun.task_instance_scheduling_decisions"""
tis: List[TI]
schedulable_tis: List[TI]
changed_tis: bool
unfinished_tasks: List[TI]
finished_tasks: List[TI]
class DagRun(Base, LoggingMixin):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(UtcDateTime, default=timezone.utcnow)
start_date = Column(UtcDateTime, default=timezone.utcnow)
end_date = Column(UtcDateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
creating_job_id = Column(Integer)
external_trigger = Column(Boolean, default=True)
run_type = Column(String(50), nullable=False)
conf = Column(PickleType)
# When a scheduler last attempted to schedule TIs for this DagRun
last_scheduling_decision = Column(UtcDateTime)
dag_hash = Column(String(32))
dag = None
__table_args__ = (
Index('dag_id_state', dag_id, _state),
UniqueConstraint('dag_id', 'execution_date'),
UniqueConstraint('dag_id', 'run_id'),
Index('idx_last_scheduling_decision', last_scheduling_decision),
)
task_instances = relationship(
TI,
primaryjoin=and_(TI.dag_id == dag_id, TI.execution_date == execution_date), # type: ignore
foreign_keys=(dag_id, execution_date),
backref=backref('dag_run', uselist=False),
)
DEFAULT_DAGRUNS_TO_EXAMINE = airflow_conf.getint(
'scheduler',
'max_dagruns_per_loop_to_schedule',
fallback=20,
)
def __init__(
self,
dag_id: Optional[str] = None,
run_id: Optional[str] = None,
execution_date: Optional[datetime] = None,
start_date: Optional[datetime] = None,
external_trigger: Optional[bool] = None,
conf: Optional[Any] = None,
state: Optional[str] = None,
run_type: Optional[str] = None,
dag_hash: Optional[str] = None,
creating_job_id: Optional[int] = None,
):
self.dag_id = dag_id
self.run_id = run_id
self.execution_date = execution_date
self.start_date = start_date
self.external_trigger = external_trigger
self.conf = conf or {}
self.state = state
self.run_type = run_type
self.dag_hash = dag_hash
self.creating_job_id = creating_job_id
super().__init__()
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger,
)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
self.end_date = timezone.utcnow() if self._state in State.finished else None
@declared_attr
def state(self):
return synonym('_state', descriptor=property(self.get_state, self.set_state))
@provide_session
def refresh_from_db(self, session: Session = None):
"""
Reloads the current dagrun from the database
:param session: database session
:type session: Session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = (
session.query(DR)
.filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id,
)
.one()
)
self.id = dr.id
self.state = dr.state
@classmethod
def next_dagruns_to_examine(
cls,
session: Session,
max_number: Optional[int] = None,
):
"""
Return the next DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
:rtype: list[airflow.models.DagRun]
"""
from airflow.models.dag import DagModel
if max_number is None:
max_number = cls.DEFAULT_DAGRUNS_TO_EXAMINE
# TODO: Bake this query, it is run _A lot_
query = (
session.query(cls)
.filter(cls.state == State.RUNNING, cls.run_type != DagRunType.BACKFILL_JOB)
.join(
DagModel,
DagModel.dag_id == cls.dag_id,
)
.filter(
DagModel.is_paused.is_(False),
DagModel.is_active.is_(True),
)
.order_by(
nulls_first(cls.last_scheduling_decision, session=session),
cls.execution_date,
)
)
if not settings.ALLOW_FUTURE_EXEC_DATES:
query = query.filter(DagRun.execution_date <= func.now())
return with_row_locks(
query.limit(max_number), of=cls, session=session, **skip_locked(session=session)
)
@staticmethod
@provide_session
def find(
dag_id: Optional[Union[str, List[str]]] = None,
run_id: Optional[str] = None,
execution_date: Optional[datetime] = None,
state: Optional[str] = None,
external_trigger: Optional[bool] = None,
no_backfills: bool = False,
run_type: Optional[DagRunType] = None,
session: Session = None,
execution_start_date: Optional[datetime] = None,
execution_end_date: Optional[datetime] = None,
) -> List["DagRun"]:
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id or list of dag_id to find dag runs for
:type dag_id: str or list[str]
:param run_id: defines the run id for this dag run
:type run_id: str
:param run_type: type of DagRun
:type run_type: airflow.utils.types.DagRunType
:param execution_date: the execution date
:type execution_date: datetime.datetime or list[datetime.datetime]
:param state: the state of the dag run
:type state: str
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param execution_start_date: dag run that was executed from this date
:type execution_start_date: datetime.datetime
:param execution_end_date: dag run that was executed until this date
:type execution_end_date: datetime.datetime
"""
DR = DagRun
qry = session.query(DR)
dag_ids = [dag_id] if isinstance(dag_id, str) else dag_id
if dag_ids:
qry = qry.filter(DR.dag_id.in_(dag_ids))
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if execution_start_date and execution_end_date:
qry = qry.filter(DR.execution_date.between(execution_start_date, execution_end_date))
elif execution_start_date:
qry = qry.filter(DR.execution_date >= execution_start_date)
elif execution_end_date:
qry = qry.filter(DR.execution_date <= execution_end_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if run_type:
qry = qry.filter(DR.run_type == run_type)
if no_backfills:
qry = qry.filter(DR.run_type != DagRunType.BACKFILL_JOB)
return qry.order_by(DR.execution_date).all()
@staticmethod
def generate_run_id(run_type: DagRunType, execution_date: datetime) -> str:
"""Generate Run ID based on Run Type and Execution Date"""
return f"{run_type}__{execution_date.isoformat()}"
@provide_session
def get_task_instances(self, state=None, session=None) -> Iterable[TI]:
"""Returns the task instances for this dag run"""
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, str):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.filter(TI.state.is_(None))
else:
not_none_state = [s for s in state if s]
tis = tis.filter(or_(TI.state.in_(not_none_state), TI.state.is_(None)))
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id: str, session: Session = None) -> Optional[TI]:
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
:type task_id: str
:param session: Sqlalchemy ORM Session
:type session: Session
"""
return (
session.query(TI)
.filter(TI.dag_id == self.dag_id, TI.execution_date == self.execution_date, TI.task_id == task_id)
.first()
)
def get_dag(self) -> "DAG":
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException(f"The DAG (.dag) for {self} needs to be set")
return self.dag
@provide_session
def get_previous_dagrun(self, state: Optional[str] = None, session: Session = None) -> Optional['DagRun']:
"""The previous DagRun, if there is one"""
filters = [
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date,
]
if state is not None:
filters.append(DagRun.state == state)
return session.query(DagRun).filter(*filters).order_by(DagRun.execution_date.desc()).first()
@provide_session
def get_previous_scheduled_dagrun(self, session: Session = None) -> Optional['DagRun']:
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date),
)
.first()
)
@provide_session
def update_state(
self, session: Session = None, execute_callbacks: bool = True
) -> Tuple[List[TI], Optional[callback_requests.DagCallbackRequest]]:
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:param session: Sqlalchemy ORM Session
:type session: Session
:param execute_callbacks: Should dag callbacks (success/failure, SLA etc) be invoked
directly (default: true) or recorded as a pending request in the ``callback`` property
:type execute_callbacks: bool
:return: Tuple containing tis that can be scheduled in the current loop & `callback` that
needs to be executed
"""
# Callback to execute in case of Task Failures
callback: Optional[callback_requests.DagCallbackRequest] = None
start_dttm = timezone.utcnow()
self.last_scheduling_decision = start_dttm
with Stats.timer(f"dagrun.dependency-check.{self.dag_id}"):
dag = self.get_dag()
info = self.task_instance_scheduling_decisions(session)
tis = info.tis
schedulable_tis = info.schedulable_tis
changed_tis = info.changed_tis
finished_tasks = info.finished_tasks
unfinished_tasks = info.unfinished_tasks
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks)
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# small speed up
are_runnable_tasks = (
schedulable_tis
or self._are_premature_tis(unfinished_tasks, finished_tasks, session)
or changed_tis
)
leaf_task_ids = {t.task_id for t in dag.leaves}
leaf_tis = [ti for ti in tis if ti.task_id in leaf_task_ids]
# if all roots finished and at least one failed, the run failed
if not unfinished_tasks and any(leaf_ti.state in State.failed_states for leaf_ti in leaf_tis):
self.log.error('Marking run %s failed', self)
self.set_state(State.FAILED)
if execute_callbacks:
dag.handle_callback(self, success=False, reason='task_failure', session=session)
elif dag.has_on_failure_callback:
callback = callback_requests.DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
execution_date=self.execution_date,
is_failure_callback=True,
msg='task_failure',
)
# if all leaves succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(leaf_ti.state in State.success_states for leaf_ti in leaf_tis):
self.log.info('Marking run %s successful', self)
self.set_state(State.SUCCESS)
if execute_callbacks:
dag.handle_callback(self, success=True, reason='success', session=session)
elif dag.has_on_success_callback:
callback = callback_requests.DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
execution_date=self.execution_date,
is_failure_callback=False,
msg='success',
)
# if *all tasks* are deadlocked, the run failed
elif unfinished_tasks and none_depends_on_past and none_task_concurrency and not are_runnable_tasks:
self.log.error('Deadlock; marking run %s failed', self)
self.set_state(State.FAILED)
if execute_callbacks:
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session)
elif dag.has_on_failure_callback:
callback = callback_requests.DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=self.dag_id,
execution_date=self.execution_date,
is_failure_callback=True,
msg='all_tasks_deadlocked',
)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(State.RUNNING)
self._emit_true_scheduling_delay_stats_for_finished_state(finished_tasks)
self._emit_duration_stats_for_finished_state()
session.merge(self)
return schedulable_tis, callback
@provide_session
def task_instance_scheduling_decisions(self, session: Session = None) -> TISchedulingDecision:
schedulable_tis: List[TI] = []
changed_tis = False
tis = list(self.get_task_instances(session=session, state=State.task_states + (State.SHUTDOWN,)))
self.log.debug("number of tis tasks for %s: %s task(s)", self, len(tis))
for ti in tis:
try:
ti.task = self.get_dag().get_task(ti.task_id)
except TaskNotFound:
self.log.warning(
"Failed to get task '%s' for dag '%s'. Marking it as removed.", ti, ti.dag_id
)
ti.state = State.REMOVED
session.flush()
unfinished_tasks = [t for t in tis if t.state in State.unfinished]
finished_tasks = [t for t in tis if t.state in State.finished]
if unfinished_tasks:
scheduleable_tasks = [ut for ut in unfinished_tasks if ut.state in SCHEDULEABLE_STATES]
self.log.debug("number of scheduleable tasks for %s: %s task(s)", self, len(scheduleable_tasks))
schedulable_tis, changed_tis = self._get_ready_tis(scheduleable_tasks, finished_tasks, session)
return TISchedulingDecision(
tis=tis,
schedulable_tis=schedulable_tis,
changed_tis=changed_tis,
unfinished_tasks=unfinished_tasks,
finished_tasks=finished_tasks,
)
def _get_ready_tis(
self,
scheduleable_tasks: List[TI],
finished_tasks: List[TI],
session: Session,
) -> Tuple[List[TI], bool]:
old_states = {}
ready_tis: List[TI] = []
changed_tis = False
if not scheduleable_tasks:
return ready_tis, changed_tis
# Check dependencies
for st in scheduleable_tasks:
old_state = st.state
if st.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True, finished_tasks=finished_tasks),
session=session,
):
ready_tis.append(st)
else:
old_states[st.key] = old_state
# Check if any ti changed state
tis_filter = TI.filter_for_tis(old_states.keys())
if tis_filter is not None:
fresh_tis = session.query(TI).filter(tis_filter).all()
changed_tis = any(ti.state != old_states[ti.key] for ti in fresh_tis)
return ready_tis, changed_tis
def _are_premature_tis(
self,
unfinished_tasks: List[TI],
finished_tasks: List[TI],
session: Session,
) -> bool:
# there might be runnable tasks that are up for retry and for some reason(retry delay, etc) are
# not ready yet so we set the flags to count them in
for ut in unfinished_tasks:
if ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True,
finished_tasks=finished_tasks,
),
session=session,
):
return True
return False
def _emit_true_scheduling_delay_stats_for_finished_state(self, finished_tis):
"""
This is a helper method to emit the true scheduling delay stats, which is defined as
the time when the first task in DAG starts minus the expected DAG run datetime.
This method will be used in the update_state method when the state of the DagRun
is updated to a completed status (either success or failure). The method will find the first
started task within the DAG and calculate the expected DagRun start time (based on
dag.execution_date & dag.schedule_interval), and minus these two values to get the delay.
The emitted data may contains outlier (e.g. when the first task was cleared, so
the second task's start_date will be used), but we can get rid of the outliers
on the stats side through the dashboards tooling built.
Note, the stat will only be emitted if the DagRun is a scheduler triggered one
(i.e. external_trigger is False).
"""
if self.state == State.RUNNING:
return
if self.external_trigger:
return
if not finished_tis:
return
try:
dag = self.get_dag()
if not self.dag.schedule_interval or self.dag.schedule_interval == "@once":
# We can't emit this metric if there is no following schedule to calculate from!
return
ordered_tis_by_start_date = [ti for ti in finished_tis if ti.start_date]
ordered_tis_by_start_date.sort(key=lambda ti: ti.start_date, reverse=False)
first_start_date = ordered_tis_by_start_date[0].start_date
if first_start_date:
# dag.following_schedule calculates the expected start datetime for a scheduled dagrun
# i.e. a daily flow for execution date 1/1/20 actually runs on 1/2/20 hh:mm:ss,
# and ti.start_date will be 1/2/20 hh:mm:ss so the following schedule is comparison
true_delay = first_start_date - dag.following_schedule(self.execution_date)
if true_delay.total_seconds() > 0:
Stats.timing(f'dagrun.{dag.dag_id}.first_task_scheduling_delay', true_delay)
except Exception as e:
self.log.warning(f'Failed to record first_task_scheduling_delay metric:\n{e}')
def _emit_duration_stats_for_finished_state(self):
if self.state == State.RUNNING:
return
if self.start_date is None:
self.log.warning('Failed to record duration of %s: start_date is not set.', self)
return
if self.end_date is None:
self.log.warning('Failed to record duration of %s: end_date is not set.', self)
return
duration = self.end_date - self.start_date
if self.state == State.SUCCESS:
Stats.timing(f'dagrun.duration.success.{self.dag_id}', duration)
elif self.state == State.FAILED:
Stats.timing(f'dagrun.duration.failed.{self.dag_id}', duration)
@provide_session
def verify_integrity(self, session: Session = None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
:param session: Sqlalchemy ORM Session
:type session: Session
"""
from airflow.settings import task_instance_mutation_hook
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = set()
for ti in tis:
task_instance_mutation_hook(ti)
task_ids.add(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state != State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '%s' for dag '%s'. Marking it as removed.", ti, dag)
Stats.incr(f"task_removed_from_dag.{dag.dag_id}", 1, 1)
ti.state = State.REMOVED
should_restore_task = (task is not None) and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '%s' which was previously removed from DAG '%s'", ti, dag)
Stats.incr(f"task_restored_to_dag.{dag.dag_id}", 1, 1)
ti.state = State.NONE
session.merge(ti)
# check for missing tasks
for task in dag.task_dict.values():
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(f"task_instance_created-{task.task_type}", 1, 1)
ti = TI(task, self.execution_date)
task_instance_mutation_hook(ti)
session.add(ti)
try:
session.flush()
except IntegrityError as err:
self.log.info(str(err))
self.log.info(
'Hit IntegrityError while creating the TIs for ' f'{dag.dag_id} - {self.execution_date}.'
)
self.log.info('Doing session rollback.')
# TODO[HA]: We probably need to savepoint this so we can keep the transaction alive.
session.rollback()
@staticmethod
def get_run(session: Session, dag_id: str, execution_date: datetime) -> Optional['DagRun']:
"""
Get a single DAG Run
:param session: Sqlalchemy ORM Session
:type session: Session
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: airflow.models.DagRun
"""
return (
session.query(DagRun)
.filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
.first()
)
@property
def is_backfill(self) -> bool:
return self.run_type == DagRunType.BACKFILL_JOB
@classmethod
@provide_session
def get_latest_runs(cls, session=None) -> List['DagRun']:
"""Returns the latest DagRun for each DAG"""
subquery = (
session.query(cls.dag_id, func.max(cls.execution_date).label('execution_date'))
.group_by(cls.dag_id)
.subquery()
)
return (
session.query(cls)
.join(
subquery,
and_(cls.dag_id == subquery.c.dag_id, cls.execution_date == subquery.c.execution_date),
)
.all()
)
@provide_session
def schedule_tis(self, schedulable_tis: Iterable[TI], session: Session = None) -> int:
"""
Set the given task instances in to the scheduled state.
Each element of ``schedulable_tis`` should have it's ``task`` attribute already set.
Any DummyOperator without callbacks is instead set straight to the success state.
All the TIs should belong to this DagRun, but this code is in the hot-path, this is not checked -- it
is the caller's responsibility to call this function only with TIs from a single dag run.
"""
# Get list of TI IDs that do not need to executed, these are
# tasks using DummyOperator and without on_execute_callback / on_success_callback
dummy_ti_ids = []
schedulable_ti_ids = []
for ti in schedulable_tis:
if (
ti.task.inherits_from_dummy_operator
and not ti.task.on_execute_callback
and not ti.task.on_success_callback
):
dummy_ti_ids.append(ti.task_id)
else:
schedulable_ti_ids.append(ti.task_id)
count = 0
if schedulable_ti_ids:
count += (
session.query(TI)
.filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id.in_(schedulable_ti_ids),
)
.update({TI.state: State.SCHEDULED}, synchronize_session=False)
)
# Tasks using DummyOperator should not be executed, mark them as success
if dummy_ti_ids:
count += (
session.query(TI)
.filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id.in_(dummy_ti_ids),
)
.update(
{
TI.state: State.SUCCESS,
TI.start_date: timezone.utcnow(),
TI.end_date: timezone.utcnow(),
TI.duration: 0,
},
synchronize_session=False,
)
)
return count
| 38.957071 | 110 | 0.613891 |
e0359a24992e4416ad0d17d3a9d4364fc129ed6f | 16,642 | py | Python | oscar_ecomenv/Lib/site-packages/faker/providers/job/fr_FR/__init__.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | oscar_ecomenv/Lib/site-packages/faker/providers/job/fr_FR/__init__.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | 6 | 2020-06-05T18:44:19.000Z | 2022-01-13T00:48:56.000Z | oscar_ecomenv/Lib/site-packages/faker/providers/job/fr_FR/__init__.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as BaseProvider
# Source: ONISEP
# data.gouv.fr/s/resources/liste-des-metiers-onisep/20150112-173428/metier.csv
class Provider(BaseProvider):
jobs=["Accessoiriste",
"Accompagnateur de moyenne montagne",
"Accompagnateur de tourisme équestre",
"Accompagnateur de voyages",
"Acheteur",
"Acheteur d'espaces publicitaires",
"Acousticien",
"Actuaire",
"Adjoint",
"Administrateur de base de données",
"Administrateur de biens",
"Administrateur de logiciels de laboratoire",
"Administrateur de mission humanitaire",
"Administrateur de réseau",
"Administrateur de spectacle",
"Administrateur judiciaire ",
"Agenceur de cuisines et salles de bains",
"Agent",
"Agent artistique",
"Agent d'assurances ",
"Agent de développement des énergies renouvelables",
"Agent de développement local",
"Agent de développement touristique",
"Agent de la surveillance SNCF",
"Agent de propreté et d'hygiène",
"Agent de propreté urbaine",
"Agent de sécurité",
"Agent de service commercial train",
"Agent des méthodes",
"Agent de transit",
"Agriculteur",
"Agronome",
"Aide médico-psychologique",
"Aide-soignant",
"Ajusteur",
"Ambulancier",
"Analyste de crédit",
"Analyste financier",
"Anatomiste",
"Anesthésiste-réanimateur",
"Animalier de laboratoire",
"Animateur",
"Animateur 2D et 3D",
"Animateur d'activités physiques et sportives",
"Animateur de bassin versant",
"Animateur de radio et de télévision",
"Animateur du patrimoine",
"Animateur nature",
"Antiquaire",
"Apiculteur",
"Aquaculteur",
"Arboriculteur",
"Architecte",
"Architecte d'intérieur",
"Architecte naval ",
"Architecte produit industriel",
"Architecte réseau",
"Architecte web",
"Archiviste",
"Archéologue",
"Aérodynamicien",
"Aromaticien",
"Artiste de cirque",
"Ascensoriste",
"Assistant",
"Assistant",
"Assistant",
"Assistant de gestion en PME",
"Assistant dentaire",
"Assistant de service social",
"Assistant en ressources humaines",
"Astrophysicien",
"Attaché",
"Attaché",
"Attaché d'administration",
"Attaché de presse",
"Attaché de recherche clinique",
"Audioprothésiste",
"Auditeur externe",
"Auditeur interne",
"Auteur interprète",
"Auxiliaire de puériculture",
"Auxiliaire de vie sociale",
"Auxiliaire spécialisé vétérinaire",
"Avocat",
"Bactériologiste",
"Barman ",
"Batelier",
"Bibliothécaire",
"Bijoutier",
"Bio-informaticien",
"Biologiste en environnement",
"Biologiste médical",
"Biostatisticien",
"Ébéniste",
"Botaniste",
"Bottier",
"Boucher",
"Boulanger",
"Brodeur",
"Bronzier",
"Cadreur",
"Caissier",
"Canalisateur",
"Carreleur-mosaïste",
"Carrossier ",
"Cartographe",
"Caviste",
"Chanteur",
"Charcutier",
"Chargé",
"Chargé d'affaires en génie climatique",
"Chargé d'affaires en génie mécanique",
"Chargé de clientèle banque",
"Chargé de communication interne",
"Chargé de pharmacovigilance",
"Chargé de production",
"Chargé de projet événementiel",
"Chargé de recherche en acoustique musicale",
"Chargé de recherche et développement déchets",
"Chargé de référencement web",
"Chargé des méthodes outils et qualité en informatique",
"Chargé des relations publiques",
"Chargé de veille législative et réglementaire",
"Chargé d'études économiques",
"Chargé d'études en aménagement",
"Chargé d'études en marketing",
"Chargé d'études en valorisation agricole des déchets",
"Chargé d'études média",
"Chargé d'études naturalistes",
"Chargé d'études ressources humaines",
"Charpentier bois",
"Charpentier métallique",
"Chaudronnier",
"Chauffeur de taxi",
"Chef comptable",
"Chef de chantier",
"Chef de chantier en installations électriques",
"Chef de cultures légumières",
"Chef de fabrication des industries graphiques",
"Chef de mission humanitaire",
"Chef de produit marketing",
"Chef de produit touristique",
"Chef de projet biodiversité",
"Chef de projet démantèlement nucléaire",
"Chef de projet informatique",
"Chef de projet multimédia",
"Chef de projet éolien",
"Chef de projet sites et sols pollués",
"Chef de projet territorial",
"Chef de publicité",
"Chef de rayon",
"Chef de station de traitement des eaux",
"Chef des ventes",
"Chef d'exploitation d'usine d'incinération",
"Chef monteur",
"Chercheur en biologie",
"Chercheur en biologie du sport",
"Chercheur en chimie",
"Chercheur en physique ",
"Chirurgien",
"Chirurgien-dentiste",
"Éclairagiste",
"Clerc d'huissier",
"Climatologue",
"Coffreur",
"Cogniticien",
"Coiffeur",
"Collaborateur de notaire",
"Collecteur de fonds",
"Comédien",
"Commerçant en alimentation",
"Commercial export",
"Commissaire de police",
"Commissaire-priseur",
"Comptable",
"Concepteur",
"Concepteur de jeux vidéo",
"Concepteur de niveaux de jeu web",
"Concepteur multimédia",
"Conducteur",
"Conducteur de bus ou d'autocar",
"Conducteur de ligne de production alimentaire",
"Conducteur de machines agricoles",
"Conducteur de machines à imprimer ",
"Conducteur de machines à papier ",
"Conducteur de métro",
"Conducteur d'engins de travaux publics",
"Conducteur de scierie",
"Conducteur de train / TGV",
"Conducteur de travaux",
"Économe de flux",
"Économiste de la construction",
"Conseiller agricole",
"Conseiller d'éducation",
"Conseiller d'insertion et de probation",
"Conseiller d'orientation-psychologue",
"Conseiller en économie sociale et familiale",
"Conseiller en environnement",
"Conseiller en fusion-acquisition",
"Conseiller en génétique",
"Conseiller en insertion sociale et professionnelle",
"Conseiller en salle de remise en forme",
"Conseiller en séjours",
"Conseiller en voyages",
"Conseiller espace info-énergie",
"Conservateur du patrimoine",
"Consignataire de navire",
"Constructeur de routes",
"Consultant",
"Consultant en solutions intégrées",
"Consultant en validation",
"Consultant green IT",
"Contrôleur",
"Contrôleur",
"Contrôleur de gestion",
"Contrôleur de performances",
"Contrôleur technique automobile",
"Convoyeur de fonds",
"Coordonnateur d'études cliniques",
"Cordiste",
"Cordonnier",
"Correcteur",
"Costumier",
"Courtier",
"Couvreur",
"Céramiste",
"Credit manager",
"Cuisinier",
"Danseur",
"Déclarant en douane",
"Décolleteur",
"Décorateur scénographe",
"Designer",
"Designer d'interaction",
"Designer sonore",
"Dessinateur BTP",
"Dessinateur de BD",
"Dessinateur en construction mécanique",
"Directeur",
"Directeur artistique",
"Directeur d'agence bancaire",
"Directeur de centre pénitentiaire",
"Directeur de création",
"Directeur de la photographie",
"Directeur de magasin à grande surface",
"Directeur de restaurant",
"Directeur d'hôpital",
"Directeur d'hôtel",
"Directeur d'office de tourisme",
"Éditeur",
"Diététicien",
"Déménageur",
"Démographe",
"Documentaliste",
"Domoticien",
"Dépanneur en électroménager",
"Dresseur d'animaux",
"Éducateur",
"Éducateur",
"Éducateur",
"Éducateur de jeunes enfants",
"Éducateur de la protection judiciaire de la jeunesse",
"Développeur économique",
"Développeur d'applications mobiles",
"Développeur humanitaire",
"Développeur informatique",
"Employé de pressing",
"Employé de restaurant",
"Encadreur",
"Enquêteur privé",
"Enseignant",
"Enseignant",
"Enseignant d'art",
"Enseignant de la conduite automobile et de la sécurité routière",
"Enseignant humanitaire",
"Entraîneur ",
"Entraîneur de chevaux",
"Ergonome",
"Ergothérapeute",
"Esthéticien",
"Ethnologue",
"Expert automobile",
"Expert bilan carbone",
"Expert-comptable",
"Expert en sécurité informatique",
"Expert immobilier",
"Façadier",
"Facteur",
"Facteur d'instruments",
"Façonnier des industries graphiques",
"Femme de chambre ",
"Ferronnier d'art",
"Fiscaliste",
"Fleuriste",
"Formateur d'adultes",
"Formateur en informatique",
"Formulateur",
"Frigoriste",
"Garde ",
"Garde à cheval",
"Gardien de la paix",
"Gardien de police municipale",
"Garçon de café",
"Gendarme",
"Gestionnaire de contrats d'assurance",
"Gestionnaire de contrats informatiques",
"Gestionnaire de données cliniques",
"Gestionnaire de patrimoine",
"Gestionnaire de portefeuille",
"Généticien",
"Géochimiste",
"Géographe",
"Géologue",
"Géologue minier",
"Géologue modélisateur",
"Géomaticien",
"Géomètre-topographe",
"Géophysicien",
"Géotechnicien",
"Géothermicien",
"Gouvernante",
"Gérant de restauration collective",
"Graphiste",
"Greffier",
"Grutier",
"Guichetier",
"Guide conférencier des villes et pays d'art et d'histoire",
"Guide de haute montagne",
"Guide-interprète",
"Halieute",
"Histologiste",
"Horloger",
"Horticulteur",
"Hot liner",
"Hôte d'accueil",
"Hôtesse de l'air ",
"Huissier de justice",
"Hydraulicien",
"Hydrogéologue",
"Hydrologue",
"Iconographe",
"Illustrateur",
"Infirmier",
"Infirmier humanitaire",
"Informaticien",
" ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur",
"Ingénieur analyste de l'air",
"Ingénieur brevets",
"Ingénieur calcul",
"Ingénieur chimiste",
"Ingénieur chimiste en développement analytique",
"Ingénieur cloud computing",
"Ingénieur d'application",
"Ingénieur de la police technique et scientifique",
"Ingénieur du BTP",
"Ingénieur du son",
"Ingénieur efficacité énergétique du bâtiment",
"Ingénieur en aéronautique",
"Ingénieur en automatisme",
"Ingénieur en construction automobile",
"Ingénieur en construction navale",
"Ingénieur en génie climatique",
"Ingénieur en informatique",
"Ingénieur en électronique numérique",
"Ingénieur en mécanique",
"Ingénieur en mécanique",
"Ingénieur en métrologie",
"Ingénieur en énergie solaire",
"Ingénieur en production et expérimentations végétales",
"Ingénieur en R et D en énergies renouvelables",
"Ingénieur environnement",
"Ingénieur environnement et risques industriels",
"Ingénieur essais",
"Ingénieur fluides, énergies, réseaux, environnement",
"Ingénieur forage",
"Ingénieur gaz",
"Ingénieur hydroécologue",
"Ingénieur hydrogéomorphologue",
"Ingénieur maintenance aéronautique",
"Ingénieur métallurgiste",
"Ingénieur méthodes mécaniques",
"Ingénieur nucléaire",
"Ingénieur plasturgiste",
"Ingénieur procédés en chimie",
"Ingénieur process aval",
"Ingénieur production dans les biotechnologies",
"Ingénieur production en mécanique",
"Ingénieur radioprotection",
"Ingénieur recherche et développement en agroalimentaire",
"Ingénieur réservoir",
"Ingénieur structures",
"Ingénieur système",
"Ingénieur systèmes embarqués",
"Ingénieur technico-commerciale en électronique",
"Ingénieur technico-commercial en chimie",
"Ingénieur textile",
"Ingénieur télécoms et réseaux",
"Ingénieur études et développement en logiciels de simulation",
"Inspecteur",
"Inspecteur de banque",
"Inspecteur du permis de conduire et de la sécurité routière",
"Installateur en télécoms",
"Intégrateur web",
"Journaliste",
"Journaliste reporter d'images ",
"Juge des enfants",
"Juge d'instance",
"Juge d'instruction",
"Juriste d'entreprise",
"Juriste droit de l'environnement",
"Juriste en droit social",
"Juriste en propriété intellectuelle",
"Lad-jockey, lad-driver",
"Élagueur",
"Électricien",
"Électromécanicien ",
"Électronicien automobile",
"Éleveur",
"Libraire",
"Linguiste",
"Logisticien",
"Machiniste constructeur ou plateau",
"Magasinier cariste",
"Magistrat",
"Manager de risques",
"Mandataire judiciaire",
"Manipulateur en électroradiologie médicale",
"Maçon",
"Maquettiste",
"Maraîcher",
"Maréchal",
"Marchandiseur",
"Marin de commerce",
"Marin pêcheur",
"Maroquinier",
"Masseur-kinésithérapeute",
"Matelot",
"Maître-chien",
"Maître d'hôtel",
"Maître nageur sauveteur",
"Mécanicien",
"Mécanicien",
"Mécanicien bateaux",
"Mécanicien d'entretien d'avion",
"Mécanicien en matériel agricole",
"Mécanicien moto",
"Mécatronicien",
"Médecin généraliste",
"Médecin humanitaire",
"Médecin spécialiste",
"Médiateur",
"Menuisier",
"Microbiologiste",
"Microtechnicien",
"Militaire du rang ",
"Militaire technicien",
"Mixeur",
"Modiste",
"Modéliste",
"Moniteur",
"Moniteur de ski",
"Moniteur d'équitation",
"Monteur",
"Monteur en installations thermiques et climatiques",
"Monteur en réseaux de distribution électrique",
"Mouleur",
"Météorologiste",
"Musicien",
"Neurobiologiste",
"Notaire",
"Océanologue",
"Oenologue",
"Officier",
"Officier de gendarmerie",
"Officier de la marine marchande",
"Officier de l'armée de l'air",
"Officier de l'armée de terre",
"Officier de marine",
"Officier de police",
"Opérateur de fabrication de produits alimentaires",
"Opérateur de raffinerie",
"Opérateur en traitement des matériaux",
"Opérateur prépresse",
"Opérateur sur machine à commande numérique",
"Opticien",
"Optronicien",
"Orfèvre",
"Orthophoniste",
"Orthoprothésiste",
"Orthoptiste",
"Ostéopathe",
"Ouvrier",
"Ouvrier agricole",
"Ouvrier paysagiste",
"Palefrenier",
"Paléontologue",
"Parfumeur",
"Paysagiste",
"Pédiatre",
"Pédicure-podologue",
"Pédologue",
"Peintre en bâtiment",
"Pharmacien",
"Pharmacien dans l'industrie",
"Photographe",
"Pilote de ligne",
"Pilote de ligne automatisée ",
"Plombier",
"Plâtrier",
"Podo-orthésiste",
"Professeur dans l'enseignement agricole",
"Professeur d'éducation physique et sportive",
"Professeur de collège et de lycée",
"Professeur de lycée professionnel ou technique ",
"Professeur de maths ou de physique-chimie",
"Professeur de musique et de danse",
"Professeur des écoles ",
"Professeur documentaliste",
"Projectionniste",
"Prothésiste dentaire",
"Préparateur en pharmacie",
"Psychanalyste",
"Psychologue",
"Psychomotricien",
"Pâtissier",
"Puériculteur",
"Qualiticien",
"Réceptionniste",
"Rédacteur",
"Rédacteur en chef",
"Rédacteur on line",
"Relieur",
"Reporter-photographe",
"Responsable assurance qualité",
"Responsable biométrie",
"Responsable de fabrication en agroalimentaire",
"Responsable de formation",
"Responsable de laboratoire de contrôle en biologie",
"Responsable de la collecte des déchets ménagers",
"Responsable de la promotion des ventes",
"Responsable de la rémunération",
"Responsable de plate-forme biotechnologique",
"Responsable de projets culturels",
"Responsable de réseau d'assainissement",
"Responsable de réseau eau potable",
"Responsable de scierie",
"Responsable de site de traitement des déchets",
"Responsable des ressources humaines",
"Responsable d'ordonnancement",
"Responsable du back-office",
"Responsable du recrutement",
"Responsable du service après-vente",
"Responsable du soutien logistique intégré",
"Responsable micro",
"Responsable qualité en agroalimentaire",
"Restaurateur d'oeuvres d'art",
"Régisseur cinéma",
"Régisseur de spectacles",
"Régleur",
"Rudologue",
"Sage-femme ",
"Salesman",
"Sapeur",
"Scénariste",
"Scripte",
"Secrétaire",
"Secrétaire administratif",
"Secrétaire d'édition",
"Secrétaire de rédaction",
"Secrétaire des Affaires étrangères",
"Secrétaire juridique",
"Secrétaire médical",
"Sellier",
"Serrurier",
"Sociologue",
"Soigneur",
"Solier-moquettiste",
"Sommelier",
"Soudeur",
"Souffleur de verre ",
"Souscripteur",
"Sous-officier de l'armée de l'air",
"Sous-officier de l'armée de terre",
"Spécialiste des affaires réglementaires en chimie",
"Sportif de haut niveau",
"Sérigraphe",
"Staffeur-ornemaniste",
"Statisticien",
"Styliste ",
"Substitut du procureur",
"Surveillant de centre pénitentiaire",
"Syndic de copropriété",
"Tailleur",
"Tailleur de pierre",
"Étalagiste",
"Étanchéiste",
"Tapissier d'ameublement ",
"Technicien ",
"Technicien ",
"Technicien",
"Technicien",
"Technicien",
"Technicien automobile",
"Technicien biologiste",
"Technicien chimiste",
"Technicien céramiste",
"Technicien d'analyses biomédicales",
"Technicien de fabrication de mobilier et de menuiserie",
"Technicien de forge",
"Technicien de l'intervention sociale et familiale",
"Technicien de maintenance en génie climatique",
"Technicien de maintenance en informatique",
"Technicien de maintenance industrielle",
"Technicien de police technique et scientifique",
"Technicien des industries du verre",
"Technicien d'essais",
"Technicien d'exploitation de l'eau",
"Technicien d'exploitation du réseau gaz",
"Technicien d'intervention clientèle gaz",
"Technicien en automatismes",
"Technicien en engins de travaux publics",
"Technicien en lignes haute tension ",
"Technicien en métrologie",
"Technicien en optique de précision",
"Technicien en traitement des déchets",
"Technicien nucléaire",
"Technicien paysagiste",
"Technicien plasturgiste",
"Technicien télécoms et réseaux",
"Technico-commercial en agroalimentaire",
"Techniverrier",
"Teinturier",
"Testeur en informatique",
"Télévendeur",
"Toiletteur d'animaux",
"Tonnelier",
"Trader",
"Traducteur-interprète",
"Urbaniste",
"Veilleur stratégique",
"Vendeur-conseil en matériel agricole",
"Vendeur en animalerie",
"Vendeur en fournitures automobiles",
"Vendeur en magasin",
"Vendeur en micro-informatique et multimédia",
"Verrier au chalumeau",
"Visiteur",
"Viticulteur",
"Vitrailliste",
"Volcanologue",
"Vétérinaire",
"Webdesigner",
"Webmestre",
"Zoologiste"]
| 24.728083 | 78 | 0.766915 |
748b86687a2fbc17ac64107176d78e96231bebbb | 25,826 | py | Python | python/resq/slo/policy.py | NetSys/resq | 6bfa19ee3d39b4a9e75c98c91435f270289d0955 | [
"MIT"
] | 13 | 2018-03-30T04:56:34.000Z | 2022-03-17T02:09:06.000Z | python/resq/slo/policy.py | NetSys/resq | 6bfa19ee3d39b4a9e75c98c91435f270289d0955 | [
"MIT"
] | 1 | 2018-10-16T16:07:04.000Z | 2019-05-30T11:15:17.000Z | python/resq/slo/policy.py | NetSys/resq | 6bfa19ee3d39b4a9e75c98c91435f270289d0955 | [
"MIT"
] | 4 | 2019-01-25T02:18:47.000Z | 2022-03-21T08:47:46.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import glob
from gurobipy import *
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from numpy import array
from pprint import pprint
from resq.profile import Profile
from resq.run import Run
from resq.nf import NFManager
class Policy:
@staticmethod
def evaluate(nr_cores_per_node=8):
result = []
for fout in glob.glob('sla/core-%d/*.out' % nr_cores_per_node):
fin = fout.replace('out', 'in')
with open(fin) as f:
terms = eval(f.read())
with open(fout) as f:
sol = eval(f.read())
methods = {
'offline.': 'CAT-Offline',
'online_cat.': 'CAT-Online',
'online_e2_cat.': 'CAT-Equal',
'online_predict_': 'Predict',
'online_e2.': 'E2'
}
method = None
for key in methods:
if key in fout:
method = methods[key]
break
categories = {
'all': 'All',
'insensitive': 'Insensitive',
'sensitive': 'Sensitive',
}
category = None
for key in categories:
if fout.startswith('sla/core-%d/%s' %
(nr_cores_per_node, key)):
category = categories[key]
print('Evaluating E2 violations for %s' % fout)
violations = [0 for t in terms]
for node in sol:
nf_list = [terms[idx]['app'] for (idx, cache) in node]
print(nf_list)
run = Run(nf_list)
if not run.is_done:
print('WARNING: do not have the results for this...')
continue
num = len(nf_list)
pps = run.pps_normalized_full_cbm
rtt = rtt_95_normalized_full_cbm
for i in xrange(num):
term_idx = node[i][0]
if pps[i] < terms[idx]['xput'] or (
not rtt[i].isnan() and rtt[i] > terms[idx]['rtt']):
violations[term_idx] = 1
print('Violations: %d/%d' % (sum(violations), len(violations)))
result.append((category, method, len(sol)))
pprint(sorted(result))
'''
width = 0.25
pos = list(range(3))
fig, ax = plt.subplots(figsize=(10,5))
plt.bar(pos, )
'''
'''
def xput(term, cache):
pivot = slos[term]['pivot']
coeff = slos[term]['coeff']
util = slos[term]['util']
util = util if util else 1
max_xput = 0
if cache <= pivot:
max_xput = coeff[0][0] * cache + coeff[0][1]
else:
max_xput = coeff[1][0] * cache + coeff[1][1]
return util * max_xput
for fout in glob.glob('sla/*.out'):
name = fout.replace('.out', '')
with open(fout) as f:
sol = eval(f.read())
with open(fout.replace('.out', '.in')) as f:
slos = eval(f.read())
terms = range(len(slos))
term_pps = [0] * len(terms)
target_pps = [slo['xput'] for slo in slos]
nr_cores = sum([len(node) for node in sol])
nr_nodes = len(sol)
for node in sol:
term_idx = [i[0] for i in node]
apps = [slos[term]['app'] for term in term_idx]
utils = [slos[term]['util'] for term in term_idx]
e = None
print(sorted(zip(apps, utils)))
continue
for i, term in enumerate(term_idx):
pps_mean = np.mean(it.compress(e.pps, e.apps == app))
term_pps[term] += e.pps_normalized_full_cbm[i]
continue
nr_violations = 0
dist = []
for i, (observed, target) in enumerate(zip(term_pps, target_pps)):
if observed < target:
e = 100 * (target - observed) / target
dist.append(e)
# print('%s: slo %d violated by %f percent' % (name, i, e))
nr_violations += 1
violations = {}
for n in [2, 5, 10, 15, 20, 30, 40, 50]:
nr_violations = len([i for i in dist if i > n])
violations[n] = 100 * nr_violations / nr_slos
nr_instances_lb = [int(math.ceil(slos[term]['xput'] / xput(term, max_cache[app])))
for term in terms]
nr_instances_ub = [int(math.ceil(slos[term]['xput'] / xput(term, 1)))
for term in terms]
nr_nodes_lb = int(math.ceil(sum(nr_instances_lb) / float(nr_cores_per_node)))
print('%s & %d & %d & %d & %d & %2.2f & %2.2f & %2.2f & %2.2f & %2.2f\\\\' % (name, nr_slos, nr_cores, nr_nodes, nr_nodes_lb, violations[2], violations[5], violations[10], violations[15], violations[20]))
'''
@staticmethod
def generate_slos(rtt=100, xput=90, nr_slos=60):
apps = {
'sensitive': ['mon', 'ip_131k', 'mazunat', 'snort'],
'insensitive': ['efficuts_32k', 'firewall_250', 'ipsec'],
'neutral': ['suricata', 'wanopt']
}
slos = {
'insensitive-%d-rtt%d-xput%d' % (nr_slos, rtt, xput):
apps['insensitive'] * int(nr_slos / len(apps['insensitive'])),
'sensitive-%d-rtt%d-xput%d' % (nr_slos, rtt, xput):
apps['sensitive'] * int(nr_slos / len(apps['sensitive'])),
'all-%d-rtt%d-xput%d' % (nr_slos, rtt, xput):
[app for l in apps.values() for app in l] *
int(nr_slos / sum([len(a) for a in apps.values()]))
}
for type_ in slos.keys():
d = []
for app in slos[type_]:
profile = Profile(pipelet=app, traffic='u100k_60')
coeff, pivot, max_cache = profile.l3_xput_approx()
util = profile.find().rtt_to_utilization(rtt)
d.append({
'app': app,
'coeff': coeff,
'pivot': pivot,
'util': util / 100 if util else 1,
'max_cache': max_cache,
'rtt': rtt,
'xput': xput
})
print(app, util)
slos[type_] = d
return slos
@staticmethod
def online_predict_nsdi12(slos, nr_cores_per_node=16, xput=90):
def xput_min(app):
return slos[app]['util'] * (
slos[app]['coeff'][0][0] * 1 + slos[app]['coeff'][0][1])
def xput_max(app):
return slos[app]['util'] * (
slos[app]['coeff'][1][0] * slos[app]['max_cache'] +
slos[app]['coeff'][1][1])
terms = []
for app in slos:
app_name = app['app']
profile = Profile(pipelet=app_name, traffic='u100k_60')
app_cache = profile.find().min_l3ways(app['xput'])
#app_num = int(math.ceil(app['xput'] / xput_max(app)))
terms.append((app_name, app_cache))
terms.sort(key=lambda t: t[1], reverse=True)
nodes = []
i = 0
for name, cache in terms:
done = False
for node in nodes:
if len(node) >= nr_cores_per_node:
continue
pipelets = [slos[idx]['app']
for idx, app_cache in node] + [name]
traffics = ['u100k_60' for _ in pipelets]
#print(pipelets)
#print(traffics)
run = Run(pipelets=pipelets, traffics=traffics)
new_pps = run.pps_predict_nsdi12_normalized_full_cbm
if (np.array(new_pps) > xput).all():
done = True
node.append((i, cache))
break
if not done:
nodes.append([(i, cache)])
i += 1
return nodes
'''
@staticmethod
def online_cat(slos, nr_cores_per_node=16, nr_ways=18):
def xput_min(app):
return slos[app]['util'] * (slos[app]['coeff'][0][0] * 1 + slos[app]['coeff'][0][1])
def xput_max(app):
return slos[app]['util'] * (slos[app]['coeff'][1][0] * slos[app]['max_cache'] + slos[app]['coeff'][1][1])
terms = []
for app in slos:
app_name = app['app']
profile = Profile(pipelet=app_name, traffic='u100k_60')
app_cache = profile.find().min_l3ways(app['xput'])
#app_num = int(math.ceil(app['xput'] / xput_max(app)))
terms.append((app_name, app_cache))
terms.sort(key=lambda t: t[1], reverse=True)
nodes = []
i = 0
for name, cache in terms:
done = False
for node in nodes:
remaining_cores = nr_cores_per_node - len(node)
remaining_cache = nr_ways - sum([core[1] for core in node])
if remaining_cores >= 1 and remaining_cache >= cache:
node.append((i, cache))
done = True
break
if not done:
nodes.append([(i, cache)])
i += 1
return nodes
'''
@staticmethod
def online_cat_binpack(slos, nr_cores_per_node=16, nr_ways=18):
def xput(slo, cache):
pivot = slo['pivot']
if cache < pivot:
return slo['util'] * (
slo['coeff'][0][0] * cache + slo['coeff'][0][1])
else:
return slo['util'] * (
slo['coeff'][1][0] * cache + slo['coeff'][1][1])
terms = []
for i, slo in enumerate(slos):
profile = Profile(pipelet=slo['app'], traffic='u100k_60')
target_xput = slo['xput']
cache_line = nr_ways / nr_cores_per_node
K = int(math.ceil(target_xput / xput(slo, cache_line)))
caches = [1] * K
xputs = [xput(slo, c) for c in caches]
done = sum(xputs) > target_xput
while not done:
for i in range(K):
caches[i] += 1
xputs[i] = xput(slo, caches[i])
if sum(xputs) > target_xput:
done = True
break
terms.append((i, caches))
nodes = []
for idx, caches in terms:
num = len(caches)
while num > 0:
done = False
for node in nodes:
used_cache = sum([c for _, c in node])
if len(node) < nr_cores_per_node and used_cache + caches[
num - 1] <= nr_ways:
node.append((idx, caches[num - 1]))
num -= 1
done = True
break
if not done:
nodes.append([(idx, caches[num - 1])])
num -= 1
return nodes
@staticmethod
def online_cat_greedy(slos, nr_cores_per_node=16, nr_ways=18):
def xput(app, cache):
pivot = slos[app]['pivot']
if cache < pivot:
return slos[app]['util'] * (slos[app]['coeff'][0][0] * cache +
slos[app]['coeff'][0][1])
else:
return slos[app]['util'] * (slos[app]['coeff'][1][0] * cache +
slos[app]['coeff'][1][1])
terms = []
for app in slos:
app_name = app['app']
profile = Profile(pipelet=app_name, traffic='u100k_60')
app_xput = app['xput']
terms.append((app_name, app_xput))
terms.sort(key=lambda t: t[1], reverse=True)
nodes = [[]]
i = 0
for name, app_xput in terms:
profile = Profile(pipelet=app_name, traffic='u100k_60')
cache_line = nr_ways / nr_cores_per_node
num = int(math.ceil(app_xput / xput(i, cache_line)))
while num > 0:
if len(nodes[-1]) < nr_cores_per_node:
nodes[-1].append((i, cache_line))
else:
nodes.append([(i, cache_line)])
num -= 1
i += 1
return nodes
@staticmethod
def online_e2(slos, nr_cores_per_node=16):
def xput_min(app):
return slos[app]['util'] * (
slos[app]['coeff'][0][0] * 1 + slos[app]['coeff'][0][1])
def xput_max(app):
return slos[app]['util'] * (
slos[app]['coeff'][1][0] * slos[app]['max_cache'] +
slos[app]['coeff'][1][1])
terms = []
for app in slos:
app_name = app['app']
profile = Profile(pipelet=app_name, traffic='u100k_60')
app_cache = profile.find().min_l3ways(app['xput'])
terms.append((app_name, app_cache))
terms.sort(key=lambda t: t[1], reverse=True)
nodes = [[]]
i = 0
for name, cache in terms:
if len(nodes[-1]) < nr_cores_per_node:
nodes[-1].append((i, cache))
else:
nodes.append([(i, cache)])
i += 1
return nodes
@staticmethod
def offline(slos, nr_cores_per_node=16, cache_size=18):
def terminator(m, where):
if where == GRB.Callback.MIP:
time = m.cbGet(GRB.Callback.RUNTIME)
best = m.cbGet(GRB.Callback.MIP_OBJBST)
if time > 60 and best < GRB.INFINITY:
# nodecnt = m.cbGet(GRB.callback.MIP_NODCNT)
# solcnt = m.cbGet(GRB.callback.MIP_SOLCNT)
objbst = m.cbGet(GRB.callback.MIP_OBJBST)
objbnd = m.cbGet(GRB.callback.MIP_OBJBND)
if abs(objbst - objbnd) < 0.10 * (1.0 + abs(objbst)):
m.terminate()
elif time > 1200 and abs(objbst - objbnd) < 0.30 * (
1.0 + abs(objbst)):
m.terminate()
def xput_min(term):
return slos[term]['util'] * (
slos[term]['coeff'][0][0] + slos[term]['coeff'][0][1])
def xput_max(term):
return slos[term]['util'] * (
slos[term]['coeff'][1][0] * slos[term]['max_cache'] +
slos[term]['coeff'][1][1])
def xput(term, instance):
# C_{ij}
cache_var = cache[term, instance]
# for all m, lambda_{ijm}
coeff_sel = [coeff_select[term, instance, i] for i in range(2)]
coeff = slos[term]['coeff']
util = slos[term]['util']
util = util if util else 1.0
max_xput = coeff_sel[0] * (coeff[0][0] * cache_var + coeff[0][1]) + \
coeff_sel[1] * (coeff[1][0] * cache_var + coeff[1][1])
print(term, instance, util, max_xput)
return (util - 0.03) * max_xput
m = Model("policy2a")
terms = range(len(slos))
nr_instances_lb = [int(math.ceil(slos[term]['xput'] / xput_max(term)))
for term in terms]
nr_instances_ub = [int(math.ceil(slos[term]['xput'] / xput_min(term)))
for term in terms]
nr_nodes_lb = int(math.ceil(sum(nr_instances_lb) / nr_cores_per_node))
nr_nodes_ub = int(math.ceil(sum(nr_instances_ub) / nr_cores_per_node))
print('nodes lb: %d, ub: %d' % (nr_nodes_lb, nr_nodes_ub))
nodes = range(nr_nodes_ub)
instances = [list(range(i)) for i in nr_instances_ub]
# lambda_{ijm}
coeff_select = {(term, instance, i):
m.addVar(vtype=GRB.BINARY,
name='CSEL_%d_%d_%d' % (term, instance, i))
for term in terms for instance in instances[term]
for i in range(2)}
# C_{ij}
cache = {(term, instance):
m.addVar(lb=0,
ub=slos[term]['max_cache'], vtype=GRB.INTEGER,
name='C_%d_%d' % (term, instance))
for term in terms for instance in instances[term]}
# I_{ijk}
affinity = {(term, instance, node):
m.addVar(vtype=GRB.BINARY,
name='I_%d_%d_%d' % (term, instance, node))
for term in terms for instance in instances[term]
for node in nodes}
# N_k
node_active = {node: m.addVar(vtype=GRB.BINARY,
name='N_%d' % (node))
for node in nodes}
# Integrate new variables
m.update()
# forall k, \sum_{i,j} I_{ijk}
node_cores = {node: quicksum(affinity[term, instance, node]
for term in terms
for instance in instances[term])
for node in nodes}
# \sum_{i,j} C_{ij} I_{ijk}
node_cache = {
node:
quicksum(cache[term, instance] * affinity[term, instance, node]
for term in terms for instance in instances[term])
for node in nodes
}
# forall i, \sum_{jk} I_{ijk}
nr_instances = {term: quicksum(affinity[term, instance, node]
for node in nodes
for instance in instances[term])
for term in terms}
# forall i,j, \sum_{k} I_{ijk}
instance_active = {(term, instance):
quicksum(affinity[term, instance, node]
for node in nodes)
for term in terms for instance in instances[term]}
# forall i, \sum_j T_{ij}
term_xput = {term: quicksum(xput(term, instance)
for instance in instances[term])
for term in terms}
# \sum_k N_k
nr_nodes_active = quicksum(node_active.values())
m.setObjective(nr_nodes_active, GRB.MINIMIZE)
#m.addConstr(nr_nodes_active >= nr_nodes_lb, name='nr_nodes_active_lb')
for term in terms:
# target throughput constraint
m.addConstr(term_xput[term] >= slos[term]['xput'],
name='term_xput_%d' % term)
# bounds on number of instances just to speed things up
m.addConstr(nr_instances[term] >= nr_instances_lb[term],
name='nr_instances_lb_%d' % term)
m.addConstr(nr_instances[term] <= nr_instances_ub[term],
name='nr_instances_ub_%d' % term)
for inst in instances[term]:
# each instance is active on at most one node
m.addSOS(GRB.SOS_TYPE1,
[affinity[term, inst, node] for node in nodes])
# if instance_active one of the coeff_select's are 1, otherwise both are 0
m.addConstr(
coeff_select[term, inst, 0] + coeff_select[term, inst, 1] -
instance_active[term, inst] == 0,
name='sum_coeff_select_%d_%d' % (term, inst))
# following conditions for piecewise linear model of xput/cache curve
# if 1 < cache < pivot then first approx is used otherwise (pivot < cache < max) the second.
m.addConstr(cache[term, inst] <= coeff_select[term, inst, 0] *
slos[term]['pivot'] + coeff_select[term, inst, 1] *
slos[term]['max_cache'], name='cache_ub_%d_%d' %
(term, inst))
m.addConstr(cache[term, inst] >= coeff_select[term, inst, 1] *
slos[term]['pivot'] + coeff_select[term, inst, 0] *
1, name='cache_lb_%d_%d' % (term, inst))
for node in nodes:
# node is active iff at least one core on it is active
m.addConstr(
node_cores[node] - nr_cores_per_node * node_active[node] <= 0,
name='node_active1_%d' % node)
m.addConstr(node_cores[node] - node_active[node] >= 0,
name='node_active2_%d' % node)
m.addConstr(node_cores[node] <= nr_cores_per_node,
name='node_cores_%d' % node)
m.addConstr(node_cache[node] <= cache_size,
name='node_cache_%d' % node)
#m.update()
#m.write('sla/policy2a.lp')
m.optimize(terminator)
if m.status == GRB.status.OPTIMAL:
print('Optimal objective: %g' % m.objVal)
elif m.status == GRB.status.INF_OR_UNBD:
print('m is infeasible or unbounded')
return None
elif m.status == GRB.status.INFEASIBLE:
print('m is infeasible')
return None
elif m.status == GRB.status.UNBOUNDED:
print('m is unbounded')
return None
else:
print('Optimization ended with status %d' % m.status)
for term in terms:
x = term_xput[term].getValue()
cores = sum(instance_active[term, inst].getValue()
for inst in instances[term])
print('term %-15s: %d cores, %-5.3g >= %-5.3g' %
(term, cores, x, slos[term]['xput']))
result = []
for node in nodes:
x = ['%d ways-%s-%d' %
(cache[term, inst].x, slos[term]['app'], inst)
for term in terms for inst in instances[term]
if affinity[term, inst, node].x > 0.5]
r = [(term, cache[term, inst].x)
for term in terms for inst in instances[term]
if affinity[term, inst, node].x > 0.5]
if x != []:
print('node %d: %s' % (node, x))
result.append(r)
print('min_number_of_nodes: %g' % m.objVal)
return result
@staticmethod
def run_online_predict_nsdi12(slos, desc, nr_cores_per_node):
outfile = 'sla/%s-online_predict_nsdi12.out' % desc
infile = 'sla/%s-online_predict_nsdi12.in' % desc
start = time.time()
r = Policy.online_predict_nsdi12(slos,
nr_cores_per_node=nr_cores_per_node)
print('** %s-online_predict_nsdi12 took: %d' %
(desc, time.time() - start))
if r:
with open(outfile, 'w') as f:
pprint(r, f)
with open(infile, 'w') as f:
pprint(slos, f)
@staticmethod
def run_online_e2(slos, desc, nr_cores_per_node):
outfile = 'sla/%s-online_e2.out' % desc
infile = 'sla/%s-online_e2.in' % desc
start = time.time()
r = Policy.online_e2(slos, nr_cores_per_node=nr_cores_per_node)
print('** %s-online_e2 took: %d' % (desc, time.time() - start))
if r:
with open(outfile, 'w') as f:
pprint(r, f)
with open(infile, 'w') as f:
pprint(slos, f)
@staticmethod
def run_online_cat_greedy(slos, desc, nr_cores_per_node):
outfile = 'sla/%s-online_cat_greedy.out' % desc
infile = 'sla/%s-online_cat_greedy.in' % desc
start = time.time()
r = Policy.online_cat_greedy(slos, nr_cores_per_node=nr_cores_per_node)
print('** %s-online_cat_greedy took: %d' % (desc, time.time() - start))
if r:
with open(outfile, 'w') as f:
pprint(r, f)
with open(infile, 'w') as f:
pprint(slos, f)
@staticmethod
def run_online_cat_binpack(slos, desc, nr_cores_per_node):
outfile = 'sla/%s-online_cat_binpack.out' % desc
infile = 'sla/%s-online_cat_binpack.in' % desc
start = time.time()
r = Policy.online_cat_binpack(slos,
nr_cores_per_node=nr_cores_per_node)
print('** %s-online_cat_binpack took: %d' %
(desc, time.time() - start))
if r:
with open(outfile, 'w') as f:
pprint(r, f)
with open(infile, 'w') as f:
pprint(slos, f)
@staticmethod
def run_offline(slos, desc, nr_cores_per_node):
outfile = 'sla/%s-offline.out' % desc
infile = 'sla/%s-offline.in' % desc
start = time.time()
r = Policy.offline(slos, nr_cores_per_node=nr_cores_per_node)
print('** %s-offline took: %d' % (desc, time.time() - start))
if r:
with open(outfile, 'w') as f:
pprint(r, f)
with open(infile, 'w') as f:
pprint(slos, f)
@staticmethod
def run():
slos_all = Policy.generate_slos(nr_slos=200)
nr_cores_per_node = 9
for k, slos in slos_all.items():
print("solving: %s" % k)
Policy.run_offline(slos, k, nr_cores_per_node)
Policy.run_online_e2(slos, k, nr_cores_per_node)
Policy.run_online_cat_binpack(slos, k, nr_cores_per_node)
Policy.run_online_cat_greedy(slos, k, nr_cores_per_node)
Policy.run_online_predict_nsdi12(slos, k, nr_cores_per_node)
#Policy.evaluate()
| 39.30898 | 216 | 0.487261 |
c040232bdea3ff576e96d8df2eede9a7e1ccd56f | 10,540 | py | Python | tools/api-key-rotation/api_key_rotation_checker/main.py | marcosgm/professional-services | f332b425c2f3b6538ebf65afda7e67de3bed1b3d | [
"Apache-2.0"
] | 2,116 | 2017-05-18T19:33:05.000Z | 2022-03-31T13:34:48.000Z | tools/api-key-rotation/api_key_rotation_checker/main.py | hyuatpc/professional-services | e5c811a8752e91fdf9f959a0414931010b0ea1ba | [
"Apache-2.0"
] | 548 | 2017-05-20T05:05:35.000Z | 2022-03-28T16:38:12.000Z | tools/api-key-rotation/api_key_rotation_checker/main.py | hyuatpc/professional-services | e5c811a8752e91fdf9f959a0414931010b0ea1ba | [
"Apache-2.0"
] | 1,095 | 2017-05-19T00:02:36.000Z | 2022-03-31T05:21:39.000Z | #!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This tool checks each GCP project in your GCP Organization
for the existence of API keys. Each key is compared
to a custom rotation period (defaults to 90).
The output consists of two groupings of API keys:
- One group are keys with rotation periods older than
the approved rotation period.
- The second group are keys with rotation periods under
the approved rotation period.
"""
import subprocess
import requests # pylint: disable=import-error
import googleapiclient.discovery # pylint: disable=import-error
from datetime import datetime, timedelta
from dateutil import parser as date_parser # pylint: disable=import-error
from google.api_core import exceptions # pylint: disable=import-error
from googleapiclient.discovery_cache.base import Cache # pylint: disable=import-error
import logging
import sys
import argparse
from dataclasses import dataclass
from dataclasses_json import dataclass_json # pylint: disable=import-error
def x_days_ago(rotation_period):
"""
Get the date x days ago to use for our comparison.
Args:
rotation_period - The rotation period in days (default is 90)
"""
rotation_days = rotation_period
today = datetime.now()
# Find the date x days ago
rotation_date = today - timedelta(days=rotation_days)
logging.info("%s days ago was %s \n", rotation_days, rotation_date)
return rotation_date
def create_project_list(service):
"""
Get our project list from GCP.
Args:
service - A resource manager service created via create_service() function
"""
logging.info(
"Getting GCP project list. This may take a minute.. \n")
# Collect all the projects
projects = []
# Paginate through the list of all available projects
request = service.projects().list()
while request is not None:
response = request.execute(num_retries=3)
projects.extend(response.get("projects", []))
request = service.projects().list_next(request, response)
return projects
def key_analysis(projects, rotation_period):
"""
Performs our rotation analysis on the available API keys.
Args:
projects - A list of GCP projects and their metadata
rotation_period - The rotation period in days (default is 90)
"""
logging.info(
"Grabbing keys and performing analysis for a rotation periods of %s days.. \n", rotation_period) # pylint: disable = line-too-long
# Get the date x (default 90) days ago
rotation_date = x_days_ago(rotation_period)
# Generates an access token
# for our API requests
access_token = create_token()
# This variable is used to hold our keys depending on their creation date
keys_needing_rotation=[]
keys_not_needing_rotation=[]
# For each project, extract the project ID
for project in projects:
project_id = project["projectId"]
try:
# Use the project ID and access token to find
# the API keys for each project
apikeys = requests.get(
f"https://apikeys.googleapis.com/v1/projects/{project_id}/apiKeys/", # pylint: disable = line-too-long
params={"access_token": access_token}
).json()
except exceptions.PermissionDenied:
continue
# If API keys exist, proceed
if "keys" in apikeys:
# Access our nested keys
# so we can iterate through the list
apikeys = apikeys["keys"]
# For each key in our dictionaries
# (API keys are dictionaries)
for apikey in apikeys:
# Google removed the "createdBy" field
# so only legacy keys have it
if "createdBy" in apikey:
# Create our API key object
# if it has "createdBy"
key_object = ApiKey(
apikey["keyId"],
apikey["displayName"],
apikey["createdBy"],
apikey["createTime"],
project_id)
else:
# Create our API key object
# if it does NOT have "createdBy"
key_object = ApiKey(
apikey["keyId"],
apikey["displayName"],
"None",
apikey["createTime"],
project_id)
# We need to convert
# our creation time for comparison
converted_creation_date = time_convert(key_object)
# Extract API Key ID for logging
key_id = key_object.key_id
# If our key is older than x days (default 90)
# based on our compare_dates function
# add api key to appropriate variable container
logging.info("Checking API key: %s creation date.. \n", key_id)
# Convert to JSON for logging
key_object_json = key_object.to_json()
if compare_dates(converted_creation_date, rotation_date):
keys_needing_rotation.append(key_object_json)
else:
keys_not_needing_rotation.append(key_object_json)
# Format our API keys
keys_needing_rotation = "\n".join(keys_needing_rotation)
keys_not_needing_rotation = "\n".join(keys_not_needing_rotation)
# Once analysis is complete for all keys,
# log results
if keys_needing_rotation:
logging.warning(
"Found API keys older than %s days. Please rotate: \n%s \n", rotation_period, keys_needing_rotation) # pylint: disable = line-too-long
if keys_not_needing_rotation:
logging.info(
"The following API key(s) are not older than %s days: \n%s", rotation_period, keys_not_needing_rotation) # pylint: disable = line-too-long
def create_token():
"""
As of March 2021, Google has released a SDK for API keys
but the SDK is in a private alpha and not accessible for
public usage. You can view the SDK here:
https://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys
Therefore, we must use the requests library and make API calls
with a bearer token until the SDK is usable by the public.
"""
access_token = subprocess.run(
"gcloud auth print-access-token",
shell=True,
check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
token = access_token.stdout
return token
def compare_dates(converted_creation_date, rotation_date):
"""
Compares createTime date to x (default 90) days ago.
Args:
converted_creation_date - The datatime formatted creation date of our API key.
rotation_date - datetime formatted "rotation_period" days ago (default 90).
Example: 2020-09-18 13:38:52.943663
"""
# If the createTime value for our key
# is over x days (default 90)
# Return true to key_analysis function
if converted_creation_date < rotation_date:
return True
else:
return False
def create_service():
"""
Creates the GCP Cloud Resource Service.
"""
return googleapiclient.discovery.build(
"cloudresourcemanager",
"v1",
cache=MemoryCache())
def time_convert(key_object):
"""
Grab the date by getting all values before the "T"
Example value: 2021-03-15T15:24:39.553722
We would get "2021-03-15"
Args:
key_object - The API key class object
"""
# Extract our creation time from our API key
create_time = key_object.create_time
# Grab our date
date_create = create_time.split("T")[0]
# Convert time to datatime format for comparison
converted_create_time = date_parser.parse(date_create)
return converted_create_time
@dataclass_json
@dataclass
class ApiKey:
"""
GCP API key class used throughout this script.
Args:
key_id - The API key ID
display_name - The API key display name
created_by - The user who created the API key (deprecated)
create_time - The creation date/time of the API key
project_id - The GCP project where the APi key lives
"""
key_id: str
display_name: str
created_by: str
create_time: str
project_id: str
class MemoryCache(Cache):
"""
File-based cache to resolve GCP noisey log entries.
"""
_CACHE = {}
def get(self, url):
return MemoryCache._CACHE.get(url)
def set(self, url, content):
MemoryCache._CACHE[url] = content
def main(args):
"""
Central logic, kicks off other functions.
Args:
args - The arguments (or defaults) passed in on the CLI
"""
# Create our rotation_period variable that
# is equal to the argument "rotation_period"
rotation_period = args.rotation_period
# Create our resource manager service
service = create_service()
# Get all available GCP projects
# based on user's permissions
project_list = create_project_list(service)
# Perform our key analysis
key_analysis(project_list, rotation_period)
if __name__ == "__main__":
# Configure our logging
logging.basicConfig(
format="%(message)s",
stream=sys.stdout,
level=logging.INFO)
# Creates our argument parser
parser = argparse.ArgumentParser(
description="Find API keys in your GCP \
org and check for keys older \
than \"rotation_period\".")
# Add the rotation period in days to the command line
parser.add_argument(
"rotation_period",
type=int,
nargs="?",
default="90",
help="The rotation period (in days) \
to check your API keys against. \
Default is 90 days.")
# Parse our arguments
args = parser.parse_args()
main(args)
| 30.550725 | 150 | 0.645351 |
88cc10bbfc1cbe5dbee1765885cf7b717a380735 | 1,822 | py | Python | samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateTensorboardTimeSeries
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_async]
from google.cloud import aiplatform_v1
async def sample_update_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
tensorboard_time_series.display_name = "display_name_value"
tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
tensorboard_time_series=tensorboard_time_series,
)
# Make the request
response = await client.update_tensorboard_time_series(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_async]
| 36.44 | 97 | 0.789791 |
65c1d37f49b21fda86202a5e4616917bc72a97ce | 407 | py | Python | src/core/migrations/0126_auto_20191220_1844.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0126_auto_20191220_1844.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0126_auto_20191220_1844.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2019-12-20 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0125_auto_20191208_0843'),
]
operations = [
migrations.AlterField(
model_name='reference',
name='url',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
| 21.421053 | 74 | 0.604423 |
1a8b5b81084bc69184e78290368aa341e8aa5093 | 65,886 | py | Python | CPAC/qc/utils.py | ccraddock/C-PAC | bc94baaa2aa83926b47f20d51f7044bc3b810273 | [
"BSD-3-Clause"
] | null | null | null | CPAC/qc/utils.py | ccraddock/C-PAC | bc94baaa2aa83926b47f20d51f7044bc3b810273 | [
"BSD-3-Clause"
] | null | null | null | CPAC/qc/utils.py | ccraddock/C-PAC | bc94baaa2aa83926b47f20d51f7044bc3b810273 | [
"BSD-3-Clause"
] | 1 | 2017-02-21T18:16:06.000Z | 2017-02-21T18:16:06.000Z | import commands
import numpy as np
import matplotlib
import pkg_resources as p
matplotlib.use('Agg')
import os
import nipype.pipeline.engine as pe
from nipype.interfaces import afni
import nipype.interfaces.utility as util
def append_to_files_in_dict_way(list_files, file_):
"""Combine files so at each resource in file appears exactly once.
Parameters
----------
list_files : list
file_ : string
Returns
-------
None
Notes
-----
Writes contents of file_ into list_files, ensuring list_files finally has
each resource appearing exactly once
"""
f_1 = open(file_, 'r')
lines = f_1.readlines()
lines = [line.rstrip('\r\n') for line in lines]
one_dict = {}
for line in lines:
if not line in one_dict:
one_dict[line] = 1
f_1.close()
for f_ in list_files:
two_dict = {}
f_2 = open(f_, 'r')
lines = f_2.readlines()
f_2.close()
f_2 = open(f_, 'w')
lines = [line.rstrip('\r\n') for line in lines]
for line in lines:
if not line in one_dict:
two_dict[line] = 1
for key in one_dict:
if not key in two_dict:
two_dict[key] = 1
for key in two_dict:
print >> f_2, key
f_2.close
def first_pass_organizing_files(qc_path):
"""First Pass at organizing qc txt files.
Parameters
----------
qc_path : string
existing path of qc_files_here directory
Returns
-------
None
Notes
-----
Combines files with same strategy. First pass combines file names,
where one file name is substring of the other.
"""
import os
from CPAC.qc.utils import append_to_files_in_dict_way
if not os.path.exists(qc_path):
os.makedirs(qc_path)
qc_files = os.listdir(qc_path)
strat_dict = {}
for file_ in sorted(qc_files, reverse=True):
if not ('.txt' in file_):
continue
file_ = os.path.join(qc_path, file_)
str_ = os.path.basename(file_)
str_ = str_.replace('qc_', '')
str_ = str_.replace('scan_', '')
str_ = str_.replace('.txt', '')
str_ = str_.replace('____', '_')
str_ = str_.replace('___', '_')
str_ = str_.replace('__', '_')
if '_hp_' in str_ and '_fwhm_' in str_ and \
not ('_bandpass_freqs_' in str_):
str_, fwhm_val = str_.split('_fwhm_')
fwhm_val = '_fwhm_' + fwhm_val
str_, hp_lp_ = str_.split('_hp_')
hp_lp_ = '_hp_' + hp_lp_
str_ = str_ + fwhm_val + hp_lp_
if strat_dict.keys() == []:
strat_dict[str_] = [file_]
else:
flag_ = 0
for key_ in strat_dict.keys():
if str_ in key_:
append_to_files_in_dict_way(strat_dict[key_], file_)
flag_ = 1
if flag_ == 1:
os.system('rm -f %s' % file_)
else:
strat_dict[str_] = [file_]
def second_pass_organizing_files(qc_path):
"""Second Pass at organizing qc txt files.
Parameters
----------
qc_path : string
existing path of qc_files_here directory
Returns
-------
None
Notes
-----
Combines files with same strategy. combines files for derivative
falff , alff with others
"""
import os
from CPAC.qc.utils import append_to_files_in_dict_way
qc_files = os.listdir(qc_path)
strat_dict = {}
got_hp_lp = 0
got_bp = 0
for file_ in sorted(qc_files, reverse=True):
if not ('.txt' in file_):
continue
str_ = file_
file_ = os.path.join(qc_path, file_)
str_ = str_.replace('qc_scan_', '')
str_ = str_.replace('.txt', '')
str_ = str_.replace('____', '_')
str_ = str_.replace('___', '_')
str_ = str_.replace('__', '_')
fwhm_val_ = ''
# organize all derivatives excluding alff falff
if '_bandpass_freqs_' in str_:
if not str_ in strat_dict:
strat_dict[str_] = [file_]
else:
print 'Error: duplicate keys for files in QC 2nd file_org ' \
'pass: %s %s' % (strat_dict[str_], file_)
raise
# organize alff falff
elif ('_hp_' in str_) and ('_lp_' in str_):
key_ = ''
key_1 = ''
hp_lp_ = ''
if '_fwhm_' in str_:
key_1 = ''
key_, hp_lp_ = str_.split('_hp_')
ignore, fwhm_val_ = hp_lp_.split('_fwhm_')
hp_lp_ = '_hp_' + ignore
key_1 = '_fwhm_' + fwhm_val_
else:
key_, hp_lp_ = str_.split('_hp_')
hp_lp_ = '_hp_' + hp_lp_
flag_ = 0
for key in strat_dict.keys():
if (key_ in key) and (key_1 in key):
append_to_files_in_dict_way(strat_dict[key], file_)
str_ = strat_dict[key][0].replace('.txt', '')
new_fname = str_ + hp_lp_ + '.txt'
os.system('mv %s %s' %(strat_dict[key][0], new_fname))
del strat_dict[key]
flag_ = 1
if flag_ == 1:
os.system('rm -f %s' % file_)
else:
if not str_ in strat_dict:
strat_dict[str_] = [file_]
else:
print 'Error: duplicate keys for files in QC 2nd file_org ' \
'pass: %s %s' % (strat_dict[str_], file_)
raise
def organize(dict_, all_ids, png_, new_dict):
"""Organizes pngs according to their IDS in new_dict dictionary
Parameters
----------
dict_ : dictionary
dict containing png id no and png type(montage/plot/hist)
all_ids : list
list of all png id numbers
png_ : string
path to png
new_dict : dictionary
dictionary containg ids and png lists
Returns
-------
all_ids : list
list of png id nos
"""
for id_no, png_type in dict_.items():
if png_type in png_:
if not id_no in new_dict.keys():
new_dict[id_no] = [png_]
else:
list_ = new_dict[id_no]
list_.append(png_)
new_dict[id_no] = list(list_)
if not id_no in all_ids:
all_ids.append(id_no)
return all_ids
def grp_pngs_by_id(pngs_, qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id):
"""Groups pngs by their ids.
Parameters
----------
pngs_ : list
list of all pngs
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
dict_a : dictionary
dictionary of axial montages key : id no
value is list of paths to axial montages
dict_s : dictionary
dictionary of sagittal montages key : id no
value is list of paths to sagittal montages
dict_plot : dictionary
dictionary of plot pngs key : id no
value is list of paths to plots
dict_hist : dictionary
dictionary of histogram pngs key : id no
value is list of paths to histogram pngs
all_ids : list
list of png id nos
"""
from CPAC.qc.utils import organize
dict_a = {}
dict_s = {}
dict_hist = {}
dict_plot = {}
all_ids = []
for png_ in pngs_:
all_ids = organize(qc_montage_id_a, all_ids, png_, dict_a)
all_ids = organize(qc_montage_id_s, all_ids, png_, dict_s)
all_ids = organize(qc_plot_id, all_ids, png_, dict_plot)
all_ids = organize(qc_hist_id, all_ids, png_, dict_hist)
return dict(dict_a), dict(dict_s), dict(dict_hist), dict(dict_plot), list(all_ids)
def add_head(f_html_, f_html_0, f_html_1, name=None):
"""Write HTML Headers to various html files.
Parameters
----------
f_html_ : string
path to main html file
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
print >>f_html_, "<html>"
print >>f_html_, "<head>"
print >>f_html_, "<title>C-PAC QC</title>"
print >>f_html_, "</head>"
print >>f_html_, ""
print >>f_html_, "<frameset cols=\"20%,80%\">"
print >>f_html_, ""
print >>f_html_, " <frame src=\"%s\" name=\"menu\"><frame src=\"%s" \
"\" name=\"content\">" \
"</frameset>" %(f_html_0.name, f_html_1.name)
print >>f_html_, ""
print >>f_html_, "</html>"
print >>f_html_0, "<html>"
print >>f_html_0, "<link href=\"%s\" rel=\"stylesheet\" " \
"media=\"screen\">"%(p.resource_filename('CPAC',"GUI/resources/html/_static/nature.css"))
print >>f_html_0, "<link href=\"%s\" rel=\"stylesheet\" " \
"media=\"screen\">"%(p.resource_filename('CPAC',"GUI/resources/html/_static/pygments.css"))
print >>f_html_0, "<head>"
print >>f_html_0, "<base target=\"content\">"
print >>f_html_0, "</head>"
print >>f_html_0, "<body bgcolor = \"#FFFF00\">"
print >>f_html_0, "<div>"
print >>f_html_0, "<div class=\"sphinxsidebarwrapper\">"
print >>f_html_0, "<p class=\"logo\"><a href=\"" \
"https://fcp-indi.github.io\" target=\"website\">"
print >>f_html_0, "<p style = \"font-family: 'Times-New-Roman'\">"
print >>f_html_0, "<img class=\"logo\" src=\"%s\" " \
"alt=\"Logo\"/>"%(p.resource_filename('CPAC', "GUI/resources/html/_static/cpac_logo.jpg"))
print >>f_html_0, "</a></p>"
print >>f_html_0, "<h3>Table Of Contents</h3>"
print >>f_html_0, "<ul>"
print >>f_html_1, '<link href="default.css" rel="stylesheet" ' \
'type="text/css" />'
print >>f_html_1, "<html>"
print >>f_html_1, "</style>"
print >>f_html_1, "<body>"
print >>f_html_1, "<a name='reverse'>"
if name:
print >>f_html_1, "<br><h1>C-PAC Visual Data Quality Control " \
"Interface</h1>"
print >>f_html_1, "<h3>C-PAC Website: <a href=\"" \
"https://fcp-indi.github.io/\" target=" \
"\"website\">https://fcp-indi.github.io</a>" \
"<br><br>"
print >>f_html_1, "C-PAC Support Forum: <a href=\"" \
"https://groups.google.com/forum/#!forum" \
"/cpax_forum\" target=\"forum\">" \
"https://groups.google.com/forum/#!forum/" \
"cpax_forum</a>"
print >>f_html_1, "<hr><br>Scan and strategy identifiers:" \
"<br>{0}".format(name)
print >>f_html_1, "</h3><br>"
def add_tail(f_html_, f_html_0, f_html_1):
"""Write HTML Tail Tags to various html files.
Parameters
----------
f_html_ : string
path to main html file
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
print >>f_html_0, "</ul>"
print >>f_html_0, "</div>"
print >>f_html_0, "</div>"
print >>f_html_0, "</body>"
print >>f_html_0, "</html>"
print >>f_html_1, "</body>"
print >>f_html_1, "</html>"
def feed_line_nav(id_, image_name, anchor, f_html_0, f_html_1):
"""Write to navigation bar html file.
Parameters
----------
id_ : string
id of the image
anchor : string
anchor id of the image
image_name : string
name of image
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
image_readable = image_name
if image_name == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name.find('snr_hist') > -1:
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name.find('mni_normalized') > -1:
image_readable = 'MNI Edge Overlapped on Normalized Anatomical'
if image_name == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name.find('movement_trans_plot') >-1:
image_readable = 'Head Displacement Plot'
if image_name.find('movement_rot_plot') >-1:
image_readable = 'Head Rotation Plot'
if image_name.find('fd_plot') > -1:
image_readable = 'Framewise Displacement Plot'
if image_name == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
print >>f_html_0, "<li><a href='%s#%s'> %s </a></li>" % (f_html_1.name,
anchor,
image_readable)
def feed_line_body(image_name, anchor, image, f_html_1):
"""Write to html file that has to contain images.
Parameters
----------
image_name : string
name of image
anchor : string
anchor id of the image
image : string
path to the image
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
image_readable = image_name
if image_name == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name.find('snr_hist') > -1:
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name.find('mni_normalized') > -1:
image_readable = 'MNI Edge Overlapped on Normalized Anatomical'
if image_name == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name.find('movement_trans_plot') >-1:
image_readable = 'Head Displacement Plot'
if image_name.find('movement_rot_plot') >-1:
image_readable = 'Head Rotation Plot'
if image_name.find('fd_plot') > -1:
image_readable = 'Framewise Displacement Plot'
if image_name == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
print >>f_html_1, "<h3><a name='%s'>%s</a> <a href='#reverse'>TOP</a></h3>" %(anchor, image_readable)
img_tag = "<br><img src='%s', alt='%s'>" %(image, image_readable)
print >>f_html_1, img_tag
def get_map_id(str_, id_):
"""Returns the proper map name given identifier for it.
Parameters
----------
str_ : string
string containing text for identifier
id_ : string
string for identifier
Returns
-------
map_id : string
proper name for a map
"""
map_id = None
'''
id_: centrality_
str_: degree_centrality_binarize_99_1mm_centrality_outputs_a.png
str_ post-split: degree_centrality_binarize_99_1mm_centrality_outputs
180515-20:46:14,382 workflow ERROR:
[!] Error: The QC interface page generator ran into a problem.
Details: too many values to unpack
'''
# so whatever goes into "type_" and then "map_id" becomes the "Map: "
# Mask: should be the ROI nifti, but right now it's the nuisance strat...
# Measure: should be eigenvector binarize etc., but it's just "centrality_outputs"
if 'centrality' in id_ or 'lfcd' in id_:
# TODO: way too reliant on a very specific string format
# TODO: needs re-factoring
str_ = str_.split('_a.png')[0]
type_, str_ = str_.rsplit(id_, 1)
if "_99_1mm_" in type_:
type_ = type_.replace("_99_1mm_", "")
map_id = type_
'''
str_ = str_.split('_')[0]
type_ = type_.replace('_', '')
map_id = '_'.join([type_, id_, str_])
'''
return map_id
else:
str_ = str_.split(id_)[1]
str_ = str_.split('_')[0]
map_id = '_'.join([id_, str_])
return map_id
def get_map_and_measure(png_a):
"""Extract Map name and Measure name from png.
Parameters
----------
png_a : string
name of png
Returns
-------
map_name : string
proper name for map
measure_name : string
proper name for measure
"""
import os
from CPAC.qc.utils import get_map_id
measure_name = None
map_name = None
if '_fwhm_' in png_a:
measure_name = os.path.basename(os.path.dirname(os.path.dirname(png_a)))
else:
measure_name = os.path.basename(os.path.dirname((png_a)))
str_ = os.path.basename(png_a)
if 'sca_tempreg' in png_a:
map_name = get_map_id(str_, 'maps_roi_')
if 'sca_roi' in png_a:
map_name = get_map_id(str_, 'ROI_')
if 'dr_tempreg' in png_a:
map_name = get_map_id(str_, 'temp_reg_map_')
if 'centrality' in png_a:
map_name = get_map_id(str_, 'centrality_')
return map_name, measure_name
def feed_lines_html(id_, dict_a, dict_s, dict_hist, dict_plot,
qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id,
f_html_0, f_html_1):
"""Write HTML Tags to various html files and embeds images.
Parameters
----------
dict_a : dictionary
dictionary of axial montages key : id no
value is list of paths to axial montages
dict_s : dictionary
dictionary of sagittal montages key : id no
value is list of paths to sagittal montages
dict_plot : dictionary
dictionary of plot pngs key : id no
value is list of paths to plots
dict_hist : dictionary
dictionary of histogram pngs key : id no
value is list of paths to histogram pngs
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
from CPAC.qc.utils import feed_line_nav
from CPAC.qc.utils import feed_line_body
from CPAC.qc.utils import get_map_and_measure
if id_ in dict_a:
dict_a[id_] = sorted(dict_a[id_])
dict_s[id_] = sorted(dict_s[id_])
if id_ in dict_hist:
dict_hist[id_] = sorted(dict_hist[id_])
idxs = len(dict_a[id_])
for idx in range(0, idxs):
png_a = dict_a[id_][idx]
png_s = dict_s[id_][idx]
png_h = None
if id_ in dict_hist:
png_h = dict_hist[id_][idx]
measure_name = None
map_name = None
if idxs > 1:
map_name, measure_name = get_map_and_measure(png_a)
id_a = str(id_)
id_s = str(id_) + '_s'
id_h = str(id_) + '_' + str(id_)
image_name_a = None
image_name_h = None
image_name_a_nav = qc_montage_id_a[id_].replace('_a', '')
if id_ in qc_hist_id:
image_name_h_nav = qc_hist_id[id_]
if map_name is not None:
image_name_a = 'Measure: ' + qc_montage_id_a[id_].replace('_a', '') + ' Mask: ' + measure_name + ' Map: ' + map_name
if id_ in qc_hist_id:
image_name_h = 'Measure: ' + qc_hist_id[id_] + ' Mask:'+ measure_name + ' Map: ' + map_name
else:
image_name_a = qc_montage_id_a[id_].replace('_a', '')
if id_ in qc_hist_id:
image_name_h = qc_hist_id[id_]
if idx != 0:
id_a = '_'.join([id_a, str(idx), 'a'])
id_s = '_'.join([id_s, str(idx), 's'])
id_h = '_'.join([id_h, str(idx), 'h' ])
if idx == 0:
if image_name_a_nav == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name_a_nav == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name_a_nav == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name_a_nav == 'snr_hist':
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name_a_nav == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name_a_nav == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name_a_nav == 'movement_trans_plot':
image_readable = 'Head Displacement Plot'
if image_name_a_nav == 'movement_rot_plot':
image_readable = 'Head Rotation Plot'
if image_name_a_nav == 'fd_plot':
image_readable = 'Framewise Displacement Plot'
if image_name_a_nav == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name_a_nav == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name_a_nav == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name_a_nav == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name_a_nav == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name_a_nav == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name_a_nav == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name_a_nav == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name_a_nav == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name_a_nav == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name_a_nav == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
feed_line_nav(id_, image_name_a_nav, id_a, f_html_0, f_html_1)
feed_line_body(image_name_a, id_a, png_a, f_html_1)
feed_line_body('', id_s, png_s, f_html_1)
if id_ in dict_hist.keys():
if idx == 0:
feed_line_nav(id_, image_name_h_nav, id_h, f_html_0,
f_html_1)
feed_line_body(image_name_h, id_h, png_h, f_html_1)
if id_ in dict_plot:
id_a = str(id_)
image_name = qc_plot_id[id_]
png_a = dict_plot[id_][0]
feed_line_nav(id_, image_name, id_a, f_html_0, f_html_1)
feed_line_body(image_name, id_a, png_a, f_html_1)
def make_page(file_, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id):
"""Convert a 'qc_files_here' text file in the CPAC output directory into
a QC HTML page.
Parameters
----------
file_ : string
path to qc path file
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
import os
from CPAC.qc.utils import grp_pngs_by_id, add_head, add_tail, \
feed_lines_html
with open(file_, 'r') as f:
pngs_ = [line.rstrip('\r\n') for line in f.readlines()]
html_f_name = file_.replace('.txt', '')
html_f_name = html_f_name.replace("'", "")
html_f_name_0 = html_f_name + '_navbar.html'
html_f_name_1 = html_f_name + '_page.html'
# TODO: this is a temporary patch until the completed QC interface is
# TODO: implemented
# pop the combined (navbar + content) page back into the output directory
# and give it a more obvious name
html_f_name = "{0}.html".format(html_f_name.replace("qc_scan",
"QC-interface_scan"))
html_f_name = html_f_name.replace("/qc_files_here", "")
f_html_ = open(html_f_name, 'wb')
f_html_0 = open(html_f_name_0, 'wb')
f_html_1 = open(html_f_name_1, 'wb')
dict_a, dict_s, dict_hist, dict_plot, all_ids = \
grp_pngs_by_id(pngs_, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id)
qc_path_file_id = os.path.basename(html_f_name).replace(".html", "")
add_head(f_html_, f_html_0, f_html_1, qc_path_file_id)
for id_ in sorted(all_ids):
feed_lines_html(id_, dict_a, dict_s, dict_hist, dict_plot,
qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id, f_html_0, f_html_1)
add_tail(f_html_, f_html_0, f_html_1)
f_html_.close()
f_html_0.close()
f_html_1.close()
def make_qc_pages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id):
"""Generates a QC HTML file for each text file in the 'qc_files_here'
folder in the CPAC output directory.
Parameters
----------
qc_path : string
path to qc_files_here directory
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
import os
from CPAC.qc.utils import make_page
qc_files = os.listdir(qc_path)
for file_ in qc_files:
if not (file_.endswith('.txt')):
continue
make_page(os.path.join(qc_path, file_), qc_montage_id_a,
qc_montage_id_s, qc_plot_id, qc_hist_id)
def generateQCPages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id):
"""Generates the QC HTML files populated with the QC images that were
created during the CPAC pipeline run.
This function runs after the pipeline is over.
Parameters
----------
qc_path : string
path to qc_files_here directory
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
from CPAC.qc.utils import first_pass_organizing_files, \
second_pass_organizing_files
from CPAC.qc.utils import make_qc_pages
# according to preprocessing strategy combines the files
first_pass_organizing_files(qc_path)
# according to bandpass and hp_lp and smoothing iterables combines the
# files
second_pass_organizing_files(qc_path)
make_qc_pages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id)
def afni_edge(in_file):
"""Run AFNI 3dedge3 on the input file - temporary function until the
interface issue in Nipype is sorted out."""
in_file = os.path.abspath(in_file)
out_file = os.path.join(os.getcwd(),
"{0}".format(os.path.basename(in_file).replace(".nii", "_edge.nii")))
cmd_string = ["3dedge3", "-input", in_file, "-prefix", out_file]
try:
retcode = subprocess.check_output(cmd_string)
except Exception as e:
err = "\n\n[!] Something went wrong with AFNI 3dedge3 while " \
"creating the an overlay for the QA pages.\n\nError details: " \
"{0}\n\nAttempted command: {1}" \
"\n\n".format(e, " ".join(cmd_string))
raise Exception(err)
return out_file
def make_edge(wf_name='create_edge'):
"""Make edge file from a scan image
Parameters
----------
file_ : string
path to the scan
Returns
-------
new_fname : string
path to edge file
"""
wf_name = pe.Workflow(name=wf_name)
inputNode = pe.Node(util.IdentityInterface(fields=['file_']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['new_fname']),
name='outputspec')
run_afni_edge_imports = ["import os", "import subprocess"]
run_afni_edge = pe.Node(util.Function(input_names=['in_file'],
output_names=['out_file'],
function=afni_edge,
imports=run_afni_edge_imports),
name='afni_3dedge3')
wf_name.connect(inputNode, 'file_', run_afni_edge, 'in_file')
wf_name.connect(run_afni_edge, 'out_file', outputNode, 'new_fname')
return wf_name
def gen_func_anat_xfm(func_, ref_, xfm_, interp_):
"""Transform functional file (std dev) into anatomical space.
Parameters
----------
func_ : string
functional scan
ref_ : string
path to reference file
xfm_ : string
path to transformation mat file
interp_ : string
interpolation measure string
Returns
-------
new_fname : string
path to the transformed scan
"""
new_fname = os.path.join(os.getcwd(), 'std_dev_anat.nii.gz')
cmd = ['applywarp', '--ref={0}'.format(ref_), '--in={0}'.format(func_),
'--out={0}'.format(new_fname), '--premat={0}'.format(xfm_),
'--interp={0}'.format(interp_)]
retcode = subprocess.check_output(cmd)
return new_fname
def gen_snr(std_dev, mean_func_anat):
"""Generate SNR file.
Parameters
----------
std_dev : string
path to std dev file in anat space
mean_func_anat : string
path to mean functional scan in anatomical space
Returns
-------
new_fname : string
path to the snr file
"""
new_fname = os.path.join(os.getcwd(), 'snr.nii.gz')
cmd = ['3dcalc', '-a', '{0}'.format(std_dev), '-b',
'{0}'.format(mean_func_anat), '-expr', 'b/a', '-prefix',
'{0}'.format(new_fname)]
retcode = subprocess.check_output(cmd)
return new_fname
def cal_snr_val(measure_file):
"""Calculate average snr value for snr image.
Parameters
----------
measure_file : string
path to input nifti file
Returns
-------
avg_snr_file : string
a text file store average snr value
"""
data = nb.load(measure_file).get_data()
data_flat = data.flatten()
data_no0 = data_flat[data_flat > 0]
snr_val = ma.mean(data_no0)
avg_snr_file = os.path.join(os.getcwd(), 'average_snr_file.txt')
f = open(avg_snr_file, 'w')
with open(avg_snr_file, 'wt') as f:
f.write(str(snr_val) + '\n')
return avg_snr_file
def gen_std_dev(mask_, func_):
"""Generate std dev file.
Parameters
----------
mask_ : string
path to whole brain mask file
func_ : string
path to functional scan
Returns
-------
new_fname : string
path to standard deviation file
"""
new_fname = os.path.join(os.getcwd(), 'std_dev.nii.gz')
cmd = ["3dTstat", "-stdev", "-mask", "{0}".format(mask_), "-prefix",
"{0}".format(new_fname), "{0}".format(func_)]
retcode = subprocess.check_output(cmd)
return new_fname
def drange(min_, max_):
"""Generate list of float values in a specified range.
Parameters
----------
min_ : float
Min value
max_ : float
Max value
Returns
-------
range_ : list
list of float values in the min_ max_ range
"""
step = float(max_ - min_) /8.0
range_ = []
while min_ <= max_:
range_.append(float('%.3f' % round(min_, 3)))
min_ += step
return range_
def gen_plot_png(arr, measure, ex_vol=None):
"""Generate Motion FD Plot. Shows which volumes were dropped.
Parameters
----------
arr : list
Frame wise Displacements
measure : string
Label of the Measure
ex_vol : list
Volumes excluded
Returns
-------
png_name : string
path to the generated plot png
"""
matplotlib.rcParams.update({'font.size': 8})
arr = np.loadtxt(arr)
if ex_vol:
try:
ex_vol = np.genfromtxt(ex_vol, delimiter=',', dtype=int)
ex_vol = ex_vol[ex_vol > 0]
except:
ex_vol = []
else:
ex_vol = []
arr = arr[1:]
del_el = [x for x in ex_vol if x < len(arr)]
ex_vol = np.array(del_el)
fig = pyplot.figure(figsize=(10, 6))
pyplot.plot([i for i in xrange(len(arr))], arr, '-')
fig.suptitle('%s plot with Mean %s = %0.4f' % (measure, measure,
arr.mean()))
if measure == 'FD' and len(ex_vol) > 0:
pyplot.scatter(ex_vol, arr[ex_vol], c="red", zorder=2)
for x in ex_vol:
pyplot.annotate('( %d , %0.3f)' % (x, arr[x]), xy=(x, arr[x]),
arrowprops=dict(facecolor='black', shrink=0.0))
pyplot.xlabel('Volumes')
pyplot.ylabel('%s' % measure)
png_name = os.path.join(os.getcwd(), '%s_plot.png' % measure)
fig.savefig(os.path.join(os.getcwd(), png_name))
pyplot.close()
matplotlib.rcdefaults()
return png_name
def gen_motion_plt(motion_parameters):
"""
Function to Generate Matplotlib plot for motion.
Separate plots for Translation and Rotation are generated.
Parameters
----------
motion_parameters: string
Motion Parameters file
Returns
-------
translation_plot : string
path to translation plot
rotation_plot : string
path to rotation plot
"""
png_name1 = 'motion_trans_plot.png'
png_name2 = 'motion_rot_plot.png'
data = np.loadtxt(motion_parameters)
data_t = data.T
translation_plot = None
rotation_plot = None
titles1 = ['x', 'y', 'z']
titles2 = ['roll', 'pitch', 'yaw']
plt.gca().set_color_cycle(['red', 'green', 'blue'])
plt.plot(data_t[0])
plt.plot(data_t[1])
plt.plot(data_t[2])
plt.legend(['x', 'y', 'z'], loc='upper right')
plt.ylabel('Translation (mm)')
plt.xlabel('Volume')
plt.savefig(os.path.join(os.getcwd(), png_name1))
plt.close()
for i in range(3, 6):
for j in range(len(data_t[i])):
data_t[i][j] = math.degrees(data_t[i][j])
plt.gca().set_color_cycle(['red', 'green', 'blue'])
plt.plot(data_t[3])
plt.plot(data_t[4])
plt.plot(data_t[5])
plt.legend(['roll', 'pitch', 'yaw'], loc='upper right')
plt.ylabel('Rotation (degrees)')
plt.xlabel('Volume')
plt.savefig(os.path.join(os.getcwd(), png_name2))
plt.close()
translation_plot = os.path.join(os.getcwd(), png_name1)
rotation_plot = os.path.join(os.getcwd(), png_name2)
return translation_plot, rotation_plot
def gen_histogram(measure_file, measure):
"""Generates Histogram Image of intensities for a given input nifti file.
Parameters
----------
measure_file : string
path to input nifti file
measure : string
Name of the measure label in the plot
Returns
-------
hist_path : string
Path to the generated histogram png
"""
hist_path = None
from CPAC.qc.utils import make_histogram
import os
m_ = measure
if isinstance(measure_file, list):
hist_path = []
for file_ in measure_file:
measure = m_
if 'sca_roi' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('ROI_')[1]
fname = 'sca_ROI_' + fname.split('_')[0]
measure = fname
if 'sca_tempreg' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('z_maps_roi_')[1]
fname = 'sca_mult_regression_maps_ROI_' + fname.split('_')[0]
measure = fname
if 'dr_tempreg' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('temp_reg_map_')[1]
fname = 'dual_regression_map_'+ fname.split('_')[0]
measure = fname
if 'centrality' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
type_, fname = fname.split('centrality_')
fname = type_ + 'centrality_' + fname.split('_')[0]
measure = fname
hist_path.append(make_histogram(file_, measure))
else:
hist_path = make_histogram(measure_file, measure)
return hist_path
def make_histogram(measure_file, measure):
"""
Generates Histogram Image of intensities for a given input
nifti file.
Parameters
----------
measure_file : string
path to input nifti file
measure : string
Name of the measure label in the plot
Returns
-------
hist_path : string
Path to the generated histogram png
"""
from matplotlib import pyplot
import numpy as np
import nibabel as nb
import os
data = nb.load(measure_file).get_data()
data_flat = data.flatten(order='F')
y, binEdges = np.histogram(data_flat[data_flat != 0], bins=100)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
fig = pyplot.figure()
fig.suptitle('%s intensity plot' % measure)
pyplot.plot(bincenters, y, '-')
pyplot.xlabel('intensity')
pyplot.ylabel('# of voxels')
png_name = os.path.join(os.getcwd(), '%s_hist_plot.png' % measure)
fig.savefig(os.path.join(os.getcwd(), png_name))
pyplot.close()
hist_path = os.path.join(os.getcwd(), png_name)
"""
###
hist_file = os.path.join(os.getcwd(), '%s_hist_path_file.txt' % measure)
fl = open(hist_file, 'w')
fl.write(str(measure_file) + '\n')
fl.write(str(hist_path) + '\n')
fl.close()
"""
return hist_path
def drop_percent_(measure_file, percent_):
"""
Zeros out voxels in measure files whose intensity doesnt fall in percent_
of voxel intensities
Parameters
----------
measure_file : string
Input nifti file
percent_ : percentage of the voxels to keep
Returns
-------
modified_measure_file : string
measure_file with 1 - percent_ voxels zeroed out
"""
import nibabel as nb
import numpy as np
import os
import commands
img = nb.load(measure_file)
data = img.get_data()
x, y, z = data.shape
max_val= float(commands.getoutput('fslstats %s -P %f' %(measure_file, percent_)))
for i in range(x):
for j in range(y):
for k in range(z):
if data[i][j][k] > 0.0:
if data[i][j][k] >= max_val:
data[i][j][k] = 0.0
save_img = nb.Nifti1Image(data, header=img.get_header(), affine=img.get_affine())
f_name = os.path.basename(os.path.splitext(os.path.splitext(measure_file)[0])[0])
saved_name = None
saved_name_correct_header = None
ext = None
if '.nii.gz' in measure_file:
ext = '.nii.gz'
else:
ext = '.nii'
saved_name = '%s_%d_%s' % (f_name, percent_, ext)
saved_name_correct_header = '%s_%d%s' % (f_name, percent_, ext)
save_img.to_filename(saved_name)
commands.getoutput("3dcalc -a %s -expr 'a' -prefix %s" % (saved_name, saved_name_correct_header))
modified_measure_file = os.path.join(os.getcwd(),
saved_name_correct_header)
return modified_measure_file
def get_spacing(across, down, dimension):
"""
Get Spacing in slices to be selected for montage
display varying in given dimension
Parameters
----------
across : integer
# images placed horizontally in montage
down : integer
# images stacked vertically in montage
Returns
-------
space : integer
# of images to skip before displaying next one
"""
space = 10
prod = (across*down*space)
if prod > dimension:
while(across*down*space) > dimension:
space -= 1
else:
while(across*down*space) < dimension:
space += 1
return space
def determine_start_and_end(data, direction, percent):
"""
Determine start slice and end slice in data file in
given direction with at least threshold percent of voxels
at start and end slices.
Parameters
----------
data : string
input nifti file
direction : string
axial or sagittal
percent : float
percent(from total) of non zero voxels at starting and ending slice
Returns
-------
start : integer
Index of starting slice
end : integer
Index of the last slice
"""
x, y, z = data.shape
xx1 = 0
xx2 = x - 1
zz1 = 0
zz2 = z - 1
total_non_zero_voxels = len(np.nonzero(data.flatten())[0])
thresh = percent * float(total_non_zero_voxels)
start = None
end = None
if 'axial' in direction:
while(zz2 > 0):
d = len(np.nonzero(data[:, :, zz2].flatten())[0])
if float(d) > thresh:
break
zz2 -= 1
while(zz1 < zz2):
d = len(np.nonzero(data[:, :, zz1].flatten())[0])
if float(d) > thresh:
break
zz1 += 1
start = zz1
end = zz2
else:
while(xx2 > 0):
d = len(np.nonzero(data[xx2, :, :].flatten())[0])
if float(d) > thresh:
break
xx2 -= 1
while(xx1 < xx2):
d = len(np.nonzero(data[xx1, :, :].flatten())[0])
if float(d) > thresh:
break
xx1 += 1
start = xx1
end = xx2
return start, end
def montage_axial(overlay, underlay, png_name, cbar_name):
"""Draws Montage using overlay on Anatomical brain in Axial Direction,
calls make_montage_axial.
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
pngs = None
if isinstance(overlay, list):
pngs = []
for ov in overlay:
fname = os.path.basename(os.path.splitext(os.path.splitext(ov)[0])[0])
pngs.append(make_montage_axial(ov, underlay,
fname + '_' + png_name, cbar_name))
else:
pngs = make_montage_axial(overlay, underlay, png_name, cbar_name)
png_name = pngs
return png_name
def make_montage_axial(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Axial Direction
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
import os
import matplotlib
matplotlib.rcParams.update({'font.size': 5})
import matplotlib.cm as cm
try:
from mpl_toolkits.axes_grid1 import ImageGrid
except:
from mpl_toolkits.axes_grid import ImageGrid
import matplotlib.pyplot as plt
import nibabel as nb
import numpy as np
from CPAC.qc.utils import determine_start_and_end, get_spacing
Y = nb.load(underlay).get_data()
X = nb.load(overlay).get_data()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
if 'skull_vis' in png_name:
X[X < 20.0] = 0.0
if 'skull_vis' in png_name or \
't1_edge_on_mean_func_in_t1' in png_name or \
'MNI_edge_on_mean_func_mni' in png_name:
max_ = np.nanmax(np.abs(X.flatten()))
X[X != 0.0] = max_
z1, z2 = determine_start_and_end(Y, 'axial', 0.0001)
spacing = get_spacing(6, 3, z2 - z1)
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
if ('snr' in png_name) or ('reho' in png_name) or \
('vmhc' in png_name) or ('sca_' in png_name) or \
('alff' in png_name) or ('centrality' in png_name) or \
('dr_tempreg' in png_name):
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="single", cbar_pad=0.2,
direction="row")
else:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, direction="row")
zz = z1
for i in range(6*3):
if zz >= z2:
break
try:
im = grid[i].imshow(np.rot90(Y[:, :, zz]), cmap=cm.Greys_r)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"axial montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
zz += spacing
x, y, z = X.shape
X[X == 0.0] = np.nan
max_ = np.nanmax(np.abs(X.flatten()))
zz = z1
im = None
for i in range(6*3):
if zz >= z2:
break
try:
if cbar_name is 'red_to_blue':
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
elif cbar_name is 'green':
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
else:
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=- max_, vmax=max_)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"axial montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
if 'snr' in png_name:
cbar.ax.set_yticks(drange(0, max_))
elif ('reho' in png_name) or ('vmhc' in png_name) or \
('sca_' in png_name) or ('alff' in png_name) or \
('centrality' in png_name) or ('dr_tempreg' in png_name):
cbar.ax.set_yticks(drange(-max_, max_))
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
matplotlib.rcdefaults()
return png_name
def montage_sagittal(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Sagittal Direction
calls make_montage_sagittal
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
pngs = None
if isinstance(overlay, list):
pngs = []
for ov in overlay:
fname = os.path.basename(os.path.splitext(os.path.splitext(ov)[0])[0])
pngs.append(make_montage_sagittal(ov, underlay, fname + '_' + png_name, cbar_name))
else:
pngs = make_montage_sagittal(overlay, underlay, png_name, cbar_name)
png_name = pngs
return png_name
def make_montage_sagittal(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Sagittal Direction
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
from CPAC.qc.utils import determine_start_and_end, get_spacing
import matplotlib
import os
import numpy as np
matplotlib.rcParams.update({'font.size': 5})
try:
from mpl_toolkits.axes_grid1 import ImageGrid
except:
from mpl_toolkits.axes_grid import ImageGrid
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import nibabel as nb
Y = nb.load(underlay).get_data()
X = nb.load(overlay).get_data()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
if 'skull_vis' in png_name:
X[X < 20.0] = 0.0
if 'skull_vis' in png_name or \
't1_edge_on_mean_func_in_t1' in png_name or \
'MNI_edge_on_mean_func_mni' in png_name:
max_ = np.nanmax(np.abs(X.flatten()))
X[X != 0.0] = max_
x1, x2 = determine_start_and_end(Y, 'sagittal', 0.0001)
spacing = get_spacing(6, 3, x2 - x1)
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
if ('snr' in png_name) or ('reho' in png_name) or \
('vmhc' in png_name) or ('sca_' in png_name) or \
('alff' in png_name) or ('centrality' in png_name) or \
('dr_tempreg' in png_name):
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="single", cbar_pad=0.5,
direction="row")
else:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
xx = x1
for i in range(6*3):
if xx >= x2:
break
try:
im = grid[i].imshow(np.rot90(Y[xx, :, :]), cmap=cm.Greys_r)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"sagittal montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
grid[i].get_xaxis().set_visible(False)
grid[i].get_yaxis().set_visible(False)
xx += spacing
x, y, z = X.shape
X[X == 0.0] = np.nan
max_ = np.nanmax(np.abs(X.flatten()))
xx = x1
for i in range(6*3):
if xx >= x2:
break
im = None
try:
if cbar_name is 'red_to_blue':
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
elif cbar_name is 'green':
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
else:
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=- max_, vmax=max_)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"sagittal montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
xx += spacing
cbar = grid.cbar_axes[0].colorbar(im)
if 'snr' in png_name:
cbar.ax.set_yticks(drange(0, max_))
elif ('reho' in png_name) or ('vmhc' in png_name) or \
('sca_' in png_name) or ('alff' in png_name) or \
('centrality' in png_name) or ('dr_tempreg' in png_name):
cbar.ax.set_yticks(drange(-max_, max_))
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
matplotlib.rcdefaults()
return png_name
def montage_gm_wm_csf_axial(overlay_csf, overlay_wm, overlay_gm, underlay, png_name):
"""
Draws Montage using GM WM and CSF overlays on Anatomical brain in Sagittal Direction
Parameters
----------
overlay_csf : string
Nifi file CSF MAP
overlay_wm : string
Nifti file WM MAP
overlay_gm : string
Nifti file GM MAP
underlay : string
Nifti for Anatomical Brain
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
Y = nb.load(underlay).get_data()
z1, z2 = determine_start_and_end(Y, 'axial', 0.0001)
spacing = get_spacing(6, 3, z2 - z1)
X_csf = nb.load(overlay_csf).get_data()
X_wm = nb.load(overlay_wm).get_data()
X_gm = nb.load(overlay_gm).get_data()
X_csf = X_csf.astype(np.float32)
X_wm = X_wm.astype(np.float32)
X_gm = X_gm.astype(np.float32)
Y = Y.astype(np.float32)
max_csf = np.nanmax(np.abs(X_csf.flatten()))
X_csf[X_csf != 0.0] = max_csf
max_wm = np.nanmax(np.abs(X_wm.flatten()))
X_wm[X_wm != 0.0] = max_wm
max_gm = np.nanmax(np.abs(X_gm.flatten()))
X_gm[X_gm != 0.0] = max_gm
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
try:
grid = ImageGrid1(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
except:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
zz = z1
for i in range(6*3):
if zz >= z2:
break
im = grid[i].imshow(np.rot90(Y[:, :, zz]), cmap=cm.Greys_r)
zz += spacing
x, y, z = X_csf.shape
X_csf[X_csf == 0.0] = np.nan
X_wm[X_wm == 0.0] = np.nan
X_gm[X_gm == 0.0] = np.nan
zz = z1
im = None
for i in range(6*3):
if zz >= z2:
break
im = grid[i].imshow(np.rot90(X_csf[:, :, zz]), cmap=cm.get_cmap('green'), alpha=0.82, vmin=0, vmax=max_csf)
im = grid[i].imshow(np.rot90(X_wm[:, :, zz]), cmap=cm.get_cmap('blue'), alpha=0.82, vmin=0, vmax=max_wm)
im = grid[i].imshow(np.rot90(X_gm[:, :, zz]), cmap=cm.get_cmap('red'), alpha=0.82, vmin=0, vmax=max_gm)
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
return png_name
def montage_gm_wm_csf_sagittal(overlay_csf, overlay_wm, overlay_gm, underlay, png_name):
"""
Draws Montage using GM WM and CSF overlays on Anatomical brain in Sagittal Direction
Parameters
----------
overlay_csf : string
Nifi file CSF MAP
overlay_wm : string
Nifti file WM MAP
overlay_gm : string
Nifti file GM MAP
underlay : string
Nifti for Anatomical Brain
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
Y = nb.load(underlay).get_data()
x1, x2 = determine_start_and_end(Y, 'sagittal', 0.0001)
spacing = get_spacing(6, 3, x2 - x1)
X_csf = nb.load(overlay_csf).get_data()
X_wm = nb.load(overlay_wm).get_data()
X_gm = nb.load(overlay_gm).get_data()
X_csf = X_csf.astype(np.float32)
X_wm = X_wm.astype(np.float32)
X_gm = X_gm.astype(np.float32)
Y = Y.astype(np.float32)
max_csf = np.nanmax(np.abs(X_csf.flatten()))
X_csf[X_csf != 0.0] = max_csf
max_wm = np.nanmax(np.abs(X_wm.flatten()))
X_wm[X_wm != 0.0] = max_wm
max_gm = np.nanmax(np.abs(X_gm.flatten()))
X_gm[X_gm != 0.0] = max_gm
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
try:
grid = ImageGrid1(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
except:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
zz = x1
for i in range(6*3):
if zz >= x2:
break
im = grid[i].imshow(np.rot90(Y[zz, :, :]), cmap=cm.Greys_r)
zz += spacing
x, y, z = X_csf.shape
X_csf[X_csf == 0.0] = np.nan
X_wm[X_wm == 0.0] = np.nan
X_gm[X_gm == 0.0] = np.nan
zz = x1
im = None
for i in range(6*3):
if zz >= x2:
break
im = grid[i].imshow(np.rot90(X_csf[zz, :, :]),
cmap=cm.get_cmap('green'), alpha=0.82, vmin=0,
vmax=max_csf)
im = grid[i].imshow(np.rot90(X_wm[zz, :, :]),
cmap=cm.get_cmap('blue'), alpha=0.82, vmin=0,
vmax=max_wm)
im = grid[i].imshow(np.rot90(X_gm[zz, :, :]),
cmap=cm.get_cmap('red'), alpha=0.82, vmin=0,
vmax=max_gm)
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
return png_name
def register_pallete(file_, cbar_name):
"""
Registers color pallete to matplotlib
Parameters
----------
file_ : string
file containing colors in hexadecimal formats in each line
cbar_name : string
Proposed name for the color bar
Returns
-------
None
"""
import matplotlib.colors as col
import matplotlib.cm as cm
f = open(file_, 'r')
colors_ = f.readlines()
colors = []
for color in reversed(colors_):
colors.append(color.rstrip('\r\n'))
cmap3 = col.ListedColormap(colors, cbar_name)
cm.register_cmap(cmap=cmap3)
def resample_1mm(file_):
"""
Calls make_resample_1mm which resamples file to 1mm space
Parameters
----------
file_ : string
path to the scan
Returns
-------
new_fname : string
path to 1mm resampled nifti file
"""
new_fname = None
if isinstance(file_, list):
new_fname = []
for f in file_:
new_fname.append(make_resample_1mm(f))
else:
new_fname = make_resample_1mm(file_)
return new_fname
def make_resample_1mm(file_):
"""
Resamples input nifti file to 1mm space
Parameters
----------
file_ : string
Input Nifti File
Returns
-------
new_fname : string
Input Nifti resampled to 1mm space
"""
import os
import commands
remainder, ext_ = os.path.splitext(file_)
remainder, ext1_ = os.path.splitext(remainder)
ext = ''.join([ext1_, ext_])
new_fname = ''.join([remainder, '_1mm', ext])
new_fname = os.path.join(os.getcwd(), os.path.basename(new_fname))
cmd = " 3dresample -dxyz 1.0 1.0 1.0 -prefix %s " \
"-inset %s " % (new_fname, file_)
commands.getoutput(cmd)
return new_fname
| 28.338065 | 137 | 0.5756 |
febbc434a9d151ba12b67c97eb7ced62135d9788 | 815 | py | Python | tomviz/python/BinTiltSeriesByTwo.py | sankhesh/tomviz | 7116f4eb75b30534a24462f4ddfb1694fe41c308 | [
"BSD-3-Clause"
] | 284 | 2015-01-05T08:53:20.000Z | 2022-03-31T07:35:16.000Z | tomviz/python/BinTiltSeriesByTwo.py | sankhesh/tomviz | 7116f4eb75b30534a24462f4ddfb1694fe41c308 | [
"BSD-3-Clause"
] | 1,579 | 2015-03-19T15:56:44.000Z | 2022-03-21T11:29:04.000Z | tomviz/python/BinTiltSeriesByTwo.py | sankhesh/tomviz | 7116f4eb75b30534a24462f4ddfb1694fe41c308 | [
"BSD-3-Clause"
] | 74 | 2015-01-29T16:24:32.000Z | 2022-03-07T21:52:29.000Z | def transform(dataset):
"""Downsample tilt images by a factor of 2"""
from tomviz import utils
import scipy.ndimage
import numpy as np
import warnings
array = dataset.active_scalars
zoom = (0.5, 0.5, 1)
result_shape = utils.zoom_shape(array, zoom)
result = np.empty(result_shape, array.dtype, order='F')
# Downsample the dataset x2 using order 1 spline (linear)
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
scipy.ndimage.interpolation.zoom(array, zoom,
output=result,
order=1,
mode='constant',
cval=0.0, prefilter=False)
# Set the result as the new scalars.
dataset.active_scalars = result
| 33.958333 | 65 | 0.568098 |
ed8d22a4785e477ff876801f2daa3479e978d87e | 10,069 | py | Python | Cura/Cura/plugins/UM3NetworkPrinting/src/Cloud/CloudApiClient.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Cura/plugins/UM3NetworkPrinting/src/Cloud/CloudApiClient.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Cura/plugins/UM3NetworkPrinting/src/Cloud/CloudApiClient.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import json
from json import JSONDecodeError
from time import time
from typing import Callable, List, Type, TypeVar, Union, Optional, Tuple, Dict, Any, cast
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QNetworkRequest, QNetworkReply, QNetworkAccessManager
from UM.Logger import Logger
from cura import UltimakerCloudAuthentication
from cura.API import Account
from .ToolPathUploader import ToolPathUploader
from ..Models.BaseModel import BaseModel
from ..Models.Http.CloudClusterResponse import CloudClusterResponse
from ..Models.Http.CloudError import CloudError
from ..Models.Http.CloudClusterStatus import CloudClusterStatus
from ..Models.Http.CloudPrintJobUploadRequest import CloudPrintJobUploadRequest
from ..Models.Http.CloudPrintResponse import CloudPrintResponse
from ..Models.Http.CloudPrintJobResponse import CloudPrintJobResponse
## The generic type variable used to document the methods below.
CloudApiClientModel = TypeVar("CloudApiClientModel", bound=BaseModel)
## The cloud API client is responsible for handling the requests and responses from the cloud.
# Each method should only handle models instead of exposing Any HTTP details.
class CloudApiClient:
# The cloud URL to use for this remote cluster.
ROOT_PATH = UltimakerCloudAuthentication.CuraCloudAPIRoot
CLUSTER_API_ROOT = "{}/connect/v1".format(ROOT_PATH)
CURA_API_ROOT = "{}/cura/v1".format(ROOT_PATH)
# In order to avoid garbage collection we keep the callbacks in this list.
_anti_gc_callbacks = [] # type: List[Callable[[], None]]
## Initializes a new cloud API client.
# \param account: The user's account object
# \param on_error: The callback to be called whenever we receive errors from the server.
def __init__(self, account: Account, on_error: Callable[[List[CloudError]], None]) -> None:
super().__init__()
self._manager = QNetworkAccessManager()
self._account = account
self._on_error = on_error
self._upload = None # type: Optional[ToolPathUploader]
## Gets the account used for the API.
@property
def account(self) -> Account:
return self._account
## Retrieves all the clusters for the user that is currently logged in.
# \param on_finished: The function to be called after the result is parsed.
def getClusters(self, on_finished: Callable[[List[CloudClusterResponse]], Any]) -> None:
url = "{}/clusters?status=active".format(self.CLUSTER_API_ROOT)
reply = self._manager.get(self._createEmptyRequest(url))
self._addCallback(reply, on_finished, CloudClusterResponse)
## Retrieves the status of the given cluster.
# \param cluster_id: The ID of the cluster.
# \param on_finished: The function to be called after the result is parsed.
def getClusterStatus(self, cluster_id: str, on_finished: Callable[[CloudClusterStatus], Any]) -> None:
url = "{}/clusters/{}/status".format(self.CLUSTER_API_ROOT, cluster_id)
reply = self._manager.get(self._createEmptyRequest(url))
self._addCallback(reply, on_finished, CloudClusterStatus)
## Requests the cloud to register the upload of a print job mesh.
# \param request: The request object.
# \param on_finished: The function to be called after the result is parsed.
def requestUpload(self, request: CloudPrintJobUploadRequest,
on_finished: Callable[[CloudPrintJobResponse], Any]) -> None:
url = "{}/jobs/upload".format(self.CURA_API_ROOT)
body = json.dumps({"data": request.toDict()})
reply = self._manager.put(self._createEmptyRequest(url), body.encode())
self._addCallback(reply, on_finished, CloudPrintJobResponse)
## Uploads a print job tool path to the cloud.
# \param print_job: The object received after requesting an upload with `self.requestUpload`.
# \param mesh: The tool path data to be uploaded.
# \param on_finished: The function to be called after the upload is successful.
# \param on_progress: A function to be called during upload progress. It receives a percentage (0-100).
# \param on_error: A function to be called if the upload fails.
def uploadToolPath(self, print_job: CloudPrintJobResponse, mesh: bytes, on_finished: Callable[[], Any],
on_progress: Callable[[int], Any], on_error: Callable[[], Any]):
self._upload = ToolPathUploader(self._manager, print_job, mesh, on_finished, on_progress, on_error)
self._upload.start()
# Requests a cluster to print the given print job.
# \param cluster_id: The ID of the cluster.
# \param job_id: The ID of the print job.
# \param on_finished: The function to be called after the result is parsed.
def requestPrint(self, cluster_id: str, job_id: str, on_finished: Callable[[CloudPrintResponse], Any]) -> None:
url = "{}/clusters/{}/print/{}".format(self.CLUSTER_API_ROOT, cluster_id, job_id)
reply = self._manager.post(self._createEmptyRequest(url), b"")
self._addCallback(reply, on_finished, CloudPrintResponse)
## Send a print job action to the cluster for the given print job.
# \param cluster_id: The ID of the cluster.
# \param cluster_job_id: The ID of the print job within the cluster.
# \param action: The name of the action to execute.
def doPrintJobAction(self, cluster_id: str, cluster_job_id: str, action: str,
data: Optional[Dict[str, Any]] = None) -> None:
body = json.dumps({"data": data}).encode() if data else b""
url = "{}/clusters/{}/print_jobs/{}/action/{}".format(self.CLUSTER_API_ROOT, cluster_id, cluster_job_id, action)
self._manager.post(self._createEmptyRequest(url), body)
## We override _createEmptyRequest in order to add the user credentials.
# \param url: The URL to request
# \param content_type: The type of the body contents.
def _createEmptyRequest(self, path: str, content_type: Optional[str] = "application/json") -> QNetworkRequest:
request = QNetworkRequest(QUrl(path))
if content_type:
request.setHeader(QNetworkRequest.ContentTypeHeader, content_type)
access_token = self._account.accessToken
if access_token:
request.setRawHeader(b"Authorization", "Bearer {}".format(access_token).encode())
return request
## Parses the given JSON network reply into a status code and a dictionary, handling unexpected errors as well.
# \param reply: The reply from the server.
# \return A tuple with a status code and a dictionary.
@staticmethod
def _parseReply(reply: QNetworkReply) -> Tuple[int, Dict[str, Any]]:
status_code = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
try:
response = bytes(reply.readAll()).decode()
return status_code, json.loads(response)
except (UnicodeDecodeError, JSONDecodeError, ValueError) as err:
error = CloudError(code=type(err).__name__, title=str(err), http_code=str(status_code),
id=str(time()), http_status="500")
Logger.logException("e", "Could not parse the stardust response: %s", error.toDict())
return status_code, {"errors": [error.toDict()]}
## Parses the given models and calls the correct callback depending on the result.
# \param response: The response from the server, after being converted to a dict.
# \param on_finished: The callback in case the response is successful.
# \param model_class: The type of the model to convert the response to. It may either be a single record or a list.
def _parseModels(self, response: Dict[str, Any],
on_finished: Union[Callable[[CloudApiClientModel], Any],
Callable[[List[CloudApiClientModel]], Any]],
model_class: Type[CloudApiClientModel]) -> None:
if "data" in response:
data = response["data"]
if isinstance(data, list):
results = [model_class(**c) for c in data] # type: List[CloudApiClientModel]
on_finished_list = cast(Callable[[List[CloudApiClientModel]], Any], on_finished)
on_finished_list(results)
else:
result = model_class(**data) # type: CloudApiClientModel
on_finished_item = cast(Callable[[CloudApiClientModel], Any], on_finished)
on_finished_item(result)
elif "errors" in response:
self._on_error([CloudError(**error) for error in response["errors"]])
else:
Logger.log("e", "Cannot find data or errors in the cloud response: %s", response)
## Creates a callback function so that it includes the parsing of the response into the correct model.
# The callback is added to the 'finished' signal of the reply.
# \param reply: The reply that should be listened to.
# \param on_finished: The callback in case the response is successful. Depending on the endpoint it will be either
# a list or a single item.
# \param model: The type of the model to convert the response to.
def _addCallback(self,
reply: QNetworkReply,
on_finished: Union[Callable[[CloudApiClientModel], Any],
Callable[[List[CloudApiClientModel]], Any]],
model: Type[CloudApiClientModel]) -> None:
def parse() -> None:
self._anti_gc_callbacks.remove(parse)
# Don't try to parse the reply if we didn't get one
if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute) is None:
return
status_code, response = self._parseReply(reply)
self._parseModels(response, on_finished, model)
self._anti_gc_callbacks.append(parse)
reply.finished.connect(parse)
| 54.722826 | 120 | 0.686364 |
d737d9c13ee7b3a3ca1bb4cf74af7726899cb69c | 1,356 | py | Python | DatabaseConnection/firebasecontroller.py | Varun8216889/Indra_Bot | e31643206c48a63b2cb934bc4c1de2f2f569ca30 | [
"MIT"
] | 13 | 2021-03-04T03:09:21.000Z | 2021-04-13T10:28:31.000Z | DatabaseConnection/firebasecontroller.py | Varun8216889/Indra_Bot | e31643206c48a63b2cb934bc4c1de2f2f569ca30 | [
"MIT"
] | 49 | 2021-02-21T10:37:06.000Z | 2021-04-21T16:03:56.000Z | DatabaseConnection/firebasecontroller.py | Varun8216889/Indra_Bot | e31643206c48a63b2cb934bc4c1de2f2f569ca30 | [
"MIT"
] | 36 | 2021-03-03T21:59:49.000Z | 2021-07-22T08:55:30.000Z | import pyrebase
import logging
from DatabaseConnection import CredentialHelper as credential
class firebase_controller(object):
def __init__(self, service):
super().__init__()
self.service = service
# this is to make a DatabaseConnection Connection with firebase DatabaseConnection service
@staticmethod
def database_connect():
firebase = pyrebase.initialize_app(credential.firebaseConfig)
if firebase is None:
print("error")
logging.error("problem while connecting to server")
else:
logging.debug("database connected")
return firebase.database()
# this is to get the database data from the firebase DatabaseConnection
def get_service_data(self):
database = self.database_connect().get()
flag = False
for i in range(len(database.pyres)):
if database.pyres[i].key() == self.service:
logging.debug("data fetched")
flag = True
return database.pyres[i]
if not flag:
logging.error("service not available")
print("test")
return "service currently not available"
# this main class is just for demo purposes
if __name__ == "__main__":
data = " enter here your database name "
db = firebase_controller(data)
| 30.818182 | 94 | 0.64233 |
f4637ee3d38c83db5d93d9441454b56e929dfcb1 | 12,004 | py | Python | reagent/test/prediction/test_predictor_wrapper.py | ananthsub/ReAgent | 92f223a135b8fbc0942a217acb117ad0935897a3 | [
"BSD-3-Clause"
] | 1 | 2021-05-03T15:18:58.000Z | 2021-05-03T15:18:58.000Z | reagent/test/prediction/test_predictor_wrapper.py | ananthsub/ReAgent | 92f223a135b8fbc0942a217acb117ad0935897a3 | [
"BSD-3-Clause"
] | null | null | null | reagent/test/prediction/test_predictor_wrapper.py | ananthsub/ReAgent | 92f223a135b8fbc0942a217acb117ad0935897a3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import random
import unittest
import reagent.models as models
import reagent.types as rlt
import torch
from reagent.model_utils.seq2slate_utils import Seq2SlateMode, Seq2SlateOutputArch
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.prediction.predictor_wrapper import (
ActorPredictorWrapper,
ActorWithPreprocessor,
DiscreteDqnPredictorWrapper,
DiscreteDqnWithPreprocessor,
ParametricDqnPredictorWrapper,
ParametricDqnWithPreprocessor,
Seq2SlatePredictorWrapper,
Seq2SlateWithPreprocessor,
)
from reagent.preprocessing.postprocessor import Postprocessor
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.test.prediction.test_prediction_utils import _cont_norm, _cont_action_norm
from reagent.test.prediction.test_prediction_utils import (
change_cand_size_slate_ranking,
)
def seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
):
batch_size, candidate_size, candidate_dim = candidate_input_prototype[0].shape
preprocessed_state = state_preprocessor(
state_input_prototype[0], state_input_prototype[1]
)
preprocessed_candidates = candidate_preprocessor(
candidate_input_prototype[0].view(batch_size * candidate_size, candidate_dim),
candidate_input_prototype[1].view(batch_size * candidate_size, candidate_dim),
).view(batch_size, candidate_size, -1)
return rlt.PreprocessedRankingInput.from_tensors(
state=preprocessed_state,
src_seq=preprocessed_candidates,
)
class TestPredictorWrapper(unittest.TestCase):
def test_discrete_wrapper(self):
ids = range(1, 5)
state_normalization_parameters = {i: _cont_norm() for i in ids}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_dim = 2
dqn = models.FullyConnectedDQN(
state_dim=len(state_normalization_parameters),
action_dim=action_dim,
sizes=[16],
activations=["relu"],
)
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(feature_id=i, name=f"feat_{i}") for i in ids
]
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
dqn, state_preprocessor, state_feature_config
)
action_names = ["L", "R"]
wrapper = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
input_prototype = dqn_with_preprocessor.input_prototype()[0]
output_action_names, q_values = wrapper(input_prototype)
self.assertEqual(action_names, output_action_names)
self.assertEqual(q_values.shape, (1, 2))
state_with_presence = input_prototype.float_features_with_presence
expected_output = dqn(rlt.FeatureData(state_preprocessor(*state_with_presence)))
self.assertTrue((expected_output == q_values).all())
def test_discrete_wrapper_with_id_list(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_dim = 2
state_feature_config = rlt.ModelFeatureConfig(
float_feature_infos=[
rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5)
],
id_list_feature_configs=[
rlt.IdListFeatureConfig(
name="A", feature_id=10, id_mapping_name="A_mapping"
)
],
id_mapping_config={"A_mapping": rlt.IdMapping(ids=[0, 1, 2])},
)
embedding_concat = models.EmbeddingBagConcat(
state_dim=len(state_normalization_parameters),
model_feature_config=state_feature_config,
embedding_dim=8,
)
dqn = models.Sequential(
embedding_concat,
rlt.TensorFeatureData(),
models.FullyConnectedDQN(
embedding_concat.output_dim,
action_dim=action_dim,
sizes=[16],
activations=["relu"],
),
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
dqn, state_preprocessor, state_feature_config
)
action_names = ["L", "R"]
wrapper = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
input_prototype = dqn_with_preprocessor.input_prototype()[0]
output_action_names, q_values = wrapper(input_prototype)
self.assertEqual(action_names, output_action_names)
self.assertEqual(q_values.shape, (1, 2))
feature_id_to_name = {
config.feature_id: config.name
for config in state_feature_config.id_list_feature_configs
}
state_id_list_features = {
feature_id_to_name[k]: v
for k, v in input_prototype.id_list_features.items()
}
state_with_presence = input_prototype.float_features_with_presence
expected_output = dqn(
rlt.FeatureData(
float_features=state_preprocessor(*state_with_presence),
id_list_features=state_id_list_features,
)
)
self.assertTrue((expected_output == q_values).all())
def test_parametric_wrapper(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
action_normalization_parameters = {i: _cont_norm() for i in range(5, 9)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
action_preprocessor = Preprocessor(action_normalization_parameters, False)
dqn = models.FullyConnectedCritic(
state_dim=len(state_normalization_parameters),
action_dim=len(action_normalization_parameters),
sizes=[16],
activations=["relu"],
)
dqn_with_preprocessor = ParametricDqnWithPreprocessor(
dqn,
state_preprocessor=state_preprocessor,
action_preprocessor=action_preprocessor,
)
wrapper = ParametricDqnPredictorWrapper(dqn_with_preprocessor)
input_prototype = dqn_with_preprocessor.input_prototype()
output_action_names, q_value = wrapper(*input_prototype)
self.assertEqual(output_action_names, ["Q"])
self.assertEqual(q_value.shape, (1, 1))
expected_output = dqn(
rlt.FeatureData(state_preprocessor(*input_prototype[0])),
rlt.FeatureData(action_preprocessor(*input_prototype[1])),
)
self.assertTrue((expected_output == q_value).all())
def test_actor_wrapper(self):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
action_normalization_parameters = {
i: _cont_action_norm() for i in range(101, 105)
}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
postprocessor = Postprocessor(action_normalization_parameters, False)
# Test with FullyConnectedActor to make behavior deterministic
actor = models.FullyConnectedActor(
state_dim=len(state_normalization_parameters),
action_dim=len(action_normalization_parameters),
sizes=[16],
activations=["relu"],
)
actor_with_preprocessor = ActorWithPreprocessor(
actor, state_preprocessor, postprocessor
)
wrapper = ActorPredictorWrapper(actor_with_preprocessor)
input_prototype = actor_with_preprocessor.input_prototype()
action = wrapper(*input_prototype)
self.assertEqual(action.shape, (1, len(action_normalization_parameters)))
expected_output = postprocessor(
actor(rlt.FeatureData(state_preprocessor(*input_prototype[0]))).action
)
self.assertTrue((expected_output == action).all())
def validate_seq2slate_output(self, expected_output, wrapper_output):
ranked_per_seq_probs, ranked_tgt_out_idx = (
expected_output.ranked_per_seq_probs,
expected_output.ranked_tgt_out_idx,
)
# -2 to offset padding symbol and decoder start symbol
ranked_tgt_out_idx -= 2
self.assertTrue(ranked_per_seq_probs == wrapper_output[0])
self.assertTrue(torch.all(torch.eq(ranked_tgt_out_idx, wrapper_output[1])))
def test_seq2slate_transformer_frechet_sort_wrapper(self):
self._test_seq2slate_wrapper(
model="transformer", output_arch=Seq2SlateOutputArch.FRECHET_SORT
)
def test_seq2slate_transformer_autoregressive_wrapper(self):
self._test_seq2slate_wrapper(
model="transformer", output_arch=Seq2SlateOutputArch.AUTOREGRESSIVE
)
def _test_seq2slate_wrapper(self, model: str, output_arch: Seq2SlateOutputArch):
state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)}
candidate_normalization_parameters = {i: _cont_norm() for i in range(101, 106)}
state_preprocessor = Preprocessor(state_normalization_parameters, False)
candidate_preprocessor = Preprocessor(candidate_normalization_parameters, False)
candidate_size = 10
slate_size = 4
seq2slate = None
if model == "transformer":
seq2slate = Seq2SlateTransformerNet(
state_dim=len(state_normalization_parameters),
candidate_dim=len(candidate_normalization_parameters),
num_stacked_layers=2,
num_heads=2,
dim_model=10,
dim_feedforward=10,
max_src_seq_len=candidate_size,
max_tgt_seq_len=slate_size,
output_arch=output_arch,
temperature=0.5,
)
else:
raise NotImplementedError(f"model type {model} is unknown")
seq2slate_with_preprocessor = Seq2SlateWithPreprocessor(
seq2slate, state_preprocessor, candidate_preprocessor, greedy=True
)
wrapper = Seq2SlatePredictorWrapper(seq2slate_with_preprocessor)
(
state_input_prototype,
candidate_input_prototype,
) = seq2slate_with_preprocessor.input_prototype()
wrapper_output = wrapper(state_input_prototype, candidate_input_prototype)
ranking_input = seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
)
expected_output = seq2slate(
ranking_input,
mode=Seq2SlateMode.RANK_MODE,
tgt_seq_len=candidate_size,
greedy=True,
)
self.validate_seq2slate_output(expected_output, wrapper_output)
# Test Seq2SlatePredictorWrapper can handle variable lengths of inputs
random_length = random.randint(candidate_size + 1, candidate_size * 2)
(
state_input_prototype,
candidate_input_prototype,
) = change_cand_size_slate_ranking(
seq2slate_with_preprocessor.input_prototype(), random_length
)
wrapper_output = wrapper(state_input_prototype, candidate_input_prototype)
ranking_input = seq2slate_input_prototype_to_ranking_input(
state_input_prototype,
candidate_input_prototype,
state_preprocessor,
candidate_preprocessor,
)
expected_output = seq2slate(
ranking_input,
mode=Seq2SlateMode.RANK_MODE,
tgt_seq_len=random_length,
greedy=True,
)
self.validate_seq2slate_output(expected_output, wrapper_output)
| 40.829932 | 88 | 0.678857 |
2a3b5bf3fa7b59fbf7a0aaea7979513ad8179237 | 1,201 | py | Python | tools/studio/cvra_studio/network/UavcanNode.py | romainreignier/robot-software | 9cd2ffedb5dec99e913d77b8b58b24d451510632 | [
"MIT"
] | null | null | null | tools/studio/cvra_studio/network/UavcanNode.py | romainreignier/robot-software | 9cd2ffedb5dec99e913d77b8b58b24d451510632 | [
"MIT"
] | null | null | null | tools/studio/cvra_studio/network/UavcanNode.py | romainreignier/robot-software | 9cd2ffedb5dec99e913d77b8b58b24d451510632 | [
"MIT"
] | null | null | null | import threading
import time
import uavcan
class UavcanNode:
def __init__(self, interface, node_id):
self.handlers = []
self.node_lock = threading.RLock()
self.node = uavcan.make_node(interface, node_id=node_id)
def add_handler(self, topic, callback):
self.handlers.append(self.node.add_handler(topic, callback))
def request(self, request, node_id, callback):
with self.node_lock:
self.node.request(request, node_id, callback)
def publish(self, msg, priority):
self.node.broadcast(msg, priority=priority)
def publish_periodically(self, period, publish_cmd):
self.node.periodic(period, publish_cmd)
def spin(self):
threading.Thread(target=self._uavcan_thread).start()
def _uavcan_thread(self):
while True:
try:
with self.node_lock:
self.node.spin(0.1)
time.sleep(0.01)
except uavcan.UAVCANException as ex:
print('Node error:', ex)
# self._uavcan_exit()
# return
def _uavcan_exit(self):
for handler in self.handlers:
handler.remove()
| 28.595238 | 68 | 0.610325 |
57073f68bd7ba3323950704edbaa27f63f514fb7 | 3,886 | py | Python | argo/workflows/client/models/v1alpha1_executor_config.py | zgs225/argo-client-python | 2e49a0df9b4f8fc9e90f7808caf22819ff54166c | [
"Apache-2.0"
] | 75 | 2020-03-17T03:55:23.000Z | 2021-11-08T09:38:37.000Z | argo/workflows/client/models/v1alpha1_executor_config.py | zgs225/argo-client-python | 2e49a0df9b4f8fc9e90f7808caf22819ff54166c | [
"Apache-2.0"
] | 24 | 2020-04-18T13:02:36.000Z | 2021-10-20T09:01:23.000Z | argo/workflows/client/models/v1alpha1_executor_config.py | zgs225/argo-client-python | 2e49a0df9b4f8fc9e90f7808caf22819ff54166c | [
"Apache-2.0"
] | 26 | 2020-04-18T12:56:28.000Z | 2022-01-05T04:47:30.000Z | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.12.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1ExecutorConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'service_account_name': 'str'
}
attribute_map = {
'service_account_name': 'serviceAccountName'
}
def __init__(self, service_account_name=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ExecutorConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._service_account_name = None
self.discriminator = None
if service_account_name is not None:
self.service_account_name = service_account_name
@property
def service_account_name(self):
"""Gets the service_account_name of this V1alpha1ExecutorConfig. # noqa: E501
ServiceAccountName specifies the service account name of the executor container. # noqa: E501
:return: The service_account_name of this V1alpha1ExecutorConfig. # noqa: E501
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""Sets the service_account_name of this V1alpha1ExecutorConfig.
ServiceAccountName specifies the service account name of the executor container. # noqa: E501
:param service_account_name: The service_account_name of this V1alpha1ExecutorConfig. # noqa: E501
:type: str
"""
self._service_account_name = service_account_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ExecutorConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ExecutorConfig):
return True
return self.to_dict() != other.to_dict()
| 31.593496 | 134 | 0.61966 |
32dfc804b70e5450c6a81ca4ee4f928e0c9fa24e | 8,375 | py | Python | perturbations_torch/perturbations.py | nlml/perturbations-torch | 55a3c5ce0ec9c6ab3def4f015691f302a5dcd826 | [
"Apache-2.0"
] | 1 | 2021-11-20T23:46:18.000Z | 2021-11-20T23:46:18.000Z | perturbations_torch/perturbations.py | nlml/perturbations-torch | 55a3c5ce0ec9c6ab3def4f015691f302a5dcd826 | [
"Apache-2.0"
] | null | null | null | perturbations_torch/perturbations.py | nlml/perturbations-torch | 55a3c5ce0ec9c6ab3def4f015691f302a5dcd826 | [
"Apache-2.0"
] | null | null | null | import torch
import functools
from typing import Tuple
_GUMBEL = "gumbel"
_NORMAL = "normal"
SUPPORTED_NOISES = (_GUMBEL, _NORMAL)
def sample_noise_with_gradients(noise, shape):
"""Samples a noise tensor according to a distribution with its gradient.
Args:
noise: (str) a type of supported noise distribution.
shape: tf.Tensor<int>, the shape of the tensor to sample.
Returns:
A tuple Tensor<float>[shape], Tensor<float>[shape] that corresponds to the
sampled noise and the gradient of log the underlying probability
distribution function. For instance, for a gaussian noise (normal), the
gradient is equal to the noise itself.
Raises:
ValueError in case the requested noise distribution is not supported.
See perturbations.SUPPORTED_NOISES for the list of supported distributions.
"""
if noise not in SUPPORTED_NOISES:
raise ValueError(
"{} noise is not supported. Use one of [{}]".format(
noise, SUPPORTED_NOISES
)
)
if noise == _GUMBEL:
sampler = torch.distributions.gumbel.Gumbel(0.0, 1.0)
samples = sampler.sample(shape)
gradients = 1 - torch.exp(-samples)
elif noise == _NORMAL:
sampler = torch.distributions.normal.Normal(0.0, 1.0)
samples = sampler.sample(shape)
gradients = samples
return samples, gradients
def perturbed(
func=None, num_samples=1000, sigma=0.05, noise=_NORMAL, batched=True
):
"""Turns a function into a differentiable one via perturbations.
The input function has to be the solution to a linear program for the trick
to work. For instance the maximum function, the logical operators or the ranks
can be expressed as solutions to some linear programs on some polytopes.
If this condition is violated though, the result would not hold and there is
no guarantee on the validity of the obtained gradients.
This function can be used directly or as a decorator.
Args:
func: the function to be turned into a perturbed and differentiable one.
Four I/O signatures for func are currently supported:
If batched is True,
(1) input [B, D1, ..., Dk], output [B, D1, ..., Dk], k >= 1
(2) input [B, D1, ..., Dk], output [B], k >= 1
If batched is False,
(3) input [D1, ..., Dk], output [D1, ..., Dk], k >= 1
(4) input [D1, ..., Dk], output [], k >= 1.
num_samples: the number of samples to use for the expectation computation.
sigma: the scale of the perturbation.
noise: a string representing the noise distribution to be used to sample
perturbations.
batched: whether inputs to the perturbed function will have a leading batch
dimension (True) or consist of a single example (False). Defaults to True.
Returns:
a function has the same signature as func but that can be back propagated.
"""
# This is a trick to have the decorator work both with and without arguments.
if func is None:
return functools.partial(
perturbed,
num_samples=num_samples,
sigma=sigma,
noise=noise,
batched=batched,
)
@functools.wraps(func)
def wrapper(input_tensor, *args, **kwargs):
def forward(input_tensor, *args, **kwargs):
class PerturbedTch(torch.autograd.Function):
@staticmethod
def forward(ctx, input_tensor):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
original_input_shape = input_tensor.shape
orig_shape = torch.LongTensor(list(original_input_shape))
if batched:
if not len(original_input_shape) >= 2:
raise ValueError(
"Batched inputs must have at least rank two"
)
else:
input_tensor = input_tensor.unsqueeze(0)
input_shape = input_tensor.shape # [B, D1, ... Dk], k >= 1
perturbed_input_shape = [num_samples] + list(input_shape)
noises = sample_noise_with_gradients(
noise, perturbed_input_shape
)
additive_noise, noise_gradient = [
noise.to(input_tensor.device).type(input_tensor.dtype)
for noise in noises
]
perturbed_input = (
input_tensor.unsqueeze(0) + sigma * additive_noise
)
# [N, B, D1, ..., Dk] -> [NB, D1, ..., Dk].
flat_batch_dim_shape = [-1] + list(input_shape[1:])
perturbed_input = perturbed_input.view(
flat_batch_dim_shape
)
# Calls user-defined function in a perturbation agnostic manner.
perturbed_output = func(perturbed_input, *args, **kwargs)
# [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk].
perturbed_input = perturbed_input.view(
perturbed_input_shape
)
# Either
# (Default case): [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk]
# or
# (Full-reduce case) [NB] -> [N, B]
perturbed_output_shape = (
[num_samples] + [-1] + list(perturbed_output.shape[1:])
)
perturbed_output = perturbed_output.view(
perturbed_output_shape
)
forward_output = perturbed_output.mean(0)
if not batched: # Removes dummy batch dimension.
forward_output = forward_output[0]
ctx.save_for_backward(
orig_shape, noise_gradient, perturbed_output
)
return forward_output
# ctx.save_for_backward(original_input_shape)
# return input_tensor.clamp(min=0)
@staticmethod
def backward(ctx, dy):
original_input_shape, noise_gradient, perturbed_output = (
ctx.saved_tensors
)
# perturbed_input_shape = [num_samples] if batched else [num_samples, 1] + list(original_input_shape)
# perturbed_input_rank = len(perturbed_input_shape)
perturbed_input_rank = len(original_input_shape) + (
1 if batched else 2
)
"""Compute the gradient of the expectation via integration by parts."""
output, noise_grad = perturbed_output, noise_gradient
# Adds dummy feature/channel dimension internally.
if perturbed_input_rank > len(output.shape):
dy = dy.unsqueeze(-1)
output = output.unsqueeze(-1)
# Adds dummy batch dimension internally.
if not batched:
dy = dy.unsqueeze(0)
# Flattens [D1, ..., Dk] to a single feat dim [D].
flatten = lambda t: t.view(t.shape[0], t.shape[1], -1)
dy = dy.view(dy.shape[0], -1) # (B, D)
output = flatten(output) # (N, B, D)
noise_grad = flatten(noise_grad) # (N, B, D)
g = torch.einsum(
"nbd,nb->bd",
noise_grad,
torch.einsum("nbd,bd->nb", output, dy),
)
g /= sigma * num_samples
return g.view(*original_input_shape)
return PerturbedTch.apply(input_tensor)
return forward(input_tensor, *args, **kwargs)
return wrapper
| 42.948718 | 121 | 0.54806 |
756b52c3c57a618897f2c26cb77bc3c2bd7f79a2 | 66,378 | py | Python | Tools/gdb/libpython.py | holmanb/cpython | 9405a02f4c50e235d01d942bd91eb4bea2a86e96 | [
"0BSD"
] | 2 | 2021-06-06T08:11:15.000Z | 2021-06-14T11:25:48.000Z | Tools/gdb/libpython.py | holmanb/cpython | 9405a02f4c50e235d01d942bd91eb4bea2a86e96 | [
"0BSD"
] | 14 | 2020-03-12T01:10:53.000Z | 2022-01-01T14:00:53.000Z | Tools/gdb/libpython.py | holmanb/cpython | 9405a02f4c50e235d01d942bd91eb4bea2a86e96 | [
"0BSD"
] | 1 | 2021-11-20T00:02:09.000Z | 2021-11-20T00:02:09.000Z | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
# compatible (2.6+ and 3.0+). See #19308.
from __future__ import print_function
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
# Look up the gdb.Type for some standard types:
# Those need to be refreshed as types (pointer sizes) may change when
# gdb loads different executables
def _type_char_ptr():
return gdb.lookup_type('char').pointer() # char*
def _type_unsigned_char_ptr():
return gdb.lookup_type('unsigned char').pointer() # unsigned char*
def _type_unsigned_short_ptr():
return gdb.lookup_type('unsigned short').pointer()
def _type_unsigned_int_ptr():
return gdb.lookup_type('unsigned int').pointer()
def _sizeof_void_p():
return gdb.lookup_type('void').pointer().sizeof
# value computed later, see PyUnicodeObjectPtr.proxy()
_is_pep393 = None
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
FRAME_INFO_OPTIMIZED_OUT = '(frame information optimized out)'
UNABLE_READ_INFO_PYTHON_FRAME = 'Unable to read information on python frame'
EVALFRAME = '_PyEval_EvalFrameDefault'
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given an integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to io.StringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
ob_type = self.type()
tp_name = ob_type.field('tp_name')
return tp_name.string()
# NullPyObjectPtr: NULL tp_name?
# RuntimeError: Can't even read the object at all?
# UnicodeDecodeError: Failed to decode tp_name bytestring
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
# RuntimeError: NULL pointers
# UnicodeDecodeError: string() fails to decode the bytestring
except (RuntimeError, UnicodeDecodeError):
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
'method-wrapper': wrapperobject,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.items()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(_sizeof_void_p() - 1)
) & ~(_sizeof_void_p() - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % _sizeof_void_p() == 0
dictptr = self._gdbval.cast(_type_char_ptr()) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# Class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
try:
ml_name = m_ml['ml_name'].string()
except UnicodeDecodeError:
ml_name = '<ml_name:UnicodeDecodeError>'
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_linetable = self.pyop_field('co_linetable').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
if addrq < 0:
return lineno
addr = 0
for addr_incr, line_incr in zip(co_linetable[::2], co_linetable[1::2]):
if addr_incr == 255:
break
addr += ord(addr_incr)
line_delta = ord(line_incr)
if line_delta == 128:
line_delta = 0
elif line_delta > 128:
line_delta -= 256
lineno += line_delta
if addr > addrq:
return lineno
assert False, "Unreachable"
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analogous to dict.iteritems()
'''
keys = self.field('ma_keys')
values = self.field('ma_values')
entries, nentries = self._get_entries(keys)
for i in safe_range(nentries):
ep = entries[i]
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
def _get_entries(self, keys):
dk_nentries = int(keys['dk_nentries'])
dk_size = 1<<int(keys['dk_log2_size'])
try:
# <= Python 3.5
return keys['dk_entries'], dk_size
except RuntimeError:
# >= Python 3.6
pass
if dk_size <= 0xFF:
offset = dk_size
elif dk_size <= 0xFFFF:
offset = 2 * dk_size
elif dk_size <= 0xFFFFFFFF:
offset = 4 * dk_size
else:
offset = 8 * dk_size
ent_addr = keys['dk_indices'].address
ent_addr = ent_addr.cast(_type_unsigned_char_ptr()) + offset
ent_ptr_t = gdb.lookup_type('PyDictKeyEntry').pointer()
ent_addr = ent_addr.cast(ent_ptr_t)
return ent_addr, dk_nentries
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
FRAME_SPECIALS_GLOBAL_OFFSET = 0
FRAME_SPECIALS_BUILTINS_OFFSET = 1
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
pnames = self.co.field('co_localsplusnames')
self.co_localsplusnames = PyTupleObjectPtr.from_pyobject_ptr(pnames)
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsptr')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if pyop_value.is_null():
continue
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_localsplusnames[i])
yield (pyop_name, pyop_value)
def _f_globals(self):
f_localsplus = self.field('f_localsptr')
nlocalsplus = int_from_int(self.co.field('co_nlocalsplus'))
index = nlocalsplus + FRAME_SPECIALS_GLOBAL_OFFSET
return PyObjectPtr.from_pyobject_ptr(f_localsplus[index])
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return ()
pyop_globals = self._f_globals()
return pyop_globals.iteritems()
def _f_builtins(self):
f_localsplus = self.field('f_localsptr')
nlocalsplus = int_from_int(self.co.field('co_nlocalsplus'))
index = nlocalsplus + FRAME_SPECIALS_BUILTINS_OFFSET
return PyObjectPtr.from_pyobject_ptr(f_localsplus[index])
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return ()
pyop_builtins = self._f_builtins()
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return FRAME_INFO_OPTIMIZED_OUT
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
try:
return self.co.addr2line(self.f_lasti*2)
except Exception:
# bpo-34989: addr2line() is a complex function, it can fail in many
# ways. For example, it fails with a TypeError on "FakeRepr" if
# gdb fails to load debug symbols. Use a catch-all "except
# Exception" to make the whole function safe. The caller has to
# handle None anyway for optimized Python.
return None
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return FRAME_INFO_OPTIMIZED_OUT
lineno = self.current_line_num()
if lineno is None:
return '(failed to get frame line number)'
filename = self.filename()
try:
with open(os_fsencode(filename), 'r') as fp:
lines = fp.readlines()
except IOError:
return None
try:
# Convert from 1-based current_line_num to 0-based list offset
return lines[lineno - 1]
except IndexError:
return None
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write(FRAME_INFO_OPTIMIZED_OUT)
return
lineno = self.current_line_num()
lineno = str(lineno) if lineno is not None else "?"
out.write('Frame 0x%x, for file %s, line %s, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
lineno,
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' %s\n' % FRAME_INFO_OPTIMIZED_OUT)
return
visited = set()
lineno = self.current_line_num()
lineno = str(lineno) if lineno is not None else "?"
sys.stdout.write(' File "%s", line %s, in %s\n'
% (self.co_filename.proxyval(visited),
lineno,
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
@classmethod
def _dummy_key(self):
return gdb.lookup_global_symbol('_PySet_Dummy').value()
def __iter__(self):
dummy_ptr = self._dummy_key()
table = self.field('table')
for i in safe_range(self.field('mask') + 1):
setentry = table[i]
key = setentry['key']
if key != 0 and key != dummy_ptr:
yield PyObjectPtr.from_pyobject_ptr(key)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = (key.proxyval(visited) for key in self)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
for key in self:
if not first:
out.write(', ')
first = False
key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr())
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple(PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size'))))
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
# Python 3.3 and newer
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
# string is not ready
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr())
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr())
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr())
else:
# Python 3.2 and earlier
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-1, UCS-2 or UCS-4 code points:
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([
(_unichr(ucs) if ucs <= 0x10ffff else '\ufffd')
for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
class wrapperobject(PyObjectPtr):
_typename = 'wrapperobject'
def safe_name(self):
try:
name = self.field('descr')['d_base']['name'].string()
return repr(name)
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return '<unknown name>'
def safe_tp_name(self):
try:
return self.field('self')['ob_type']['tp_name'].string()
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return '<unknown tp_name>'
def safe_self_addresss(self):
try:
address = long(self.field('self'))
return '%#x' % address
except (NullPyObjectPtr, RuntimeError):
return '<failed to get self address>'
def proxyval(self, visited):
name = self.safe_name()
tp_name = self.safe_tp_name()
self_address = self.safe_self_addresss()
return ("<method-wrapper %s of %s object at %s>"
% (name, tp_name, self_address))
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
out.write(proxy)
def int_from_int(gdbval):
return int(gdbval)
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code != gdb.TYPE_CODE_PTR:
return None
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject", "wrapperobject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
# - everything else
def is_python_frame(self):
'''Is this a _PyEval_EvalFrameDefault frame, or some other important
frame? (see is_other_python_frame for what "important" means in this
context)'''
if self.is_evalframe():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframe(self):
'''Is this a _PyEval_EvalFrameDefault frame?'''
if self._gdbframe.name() == EVALFRAME:
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a _PyEval_EvalFrameDefault frame:
return True
return False
def is_other_python_frame(self):
'''Is this frame worth displaying in python backtraces?
Examples:
- waiting on the GIL
- garbage-collecting
- within a CFunction
If it is, return a descriptive string
For other frames, return False
'''
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
if self.is_gc_collect():
return 'Garbage-collecting'
# Detect invocations of PyCFunction instances:
frame = self._gdbframe
caller = frame.name()
if not caller:
return False
if (caller.startswith('cfunction_vectorcall_') or
caller == 'cfunction_call'):
arg_name = 'func'
# Within that frame:
# "func" is the local containing the PyObject* of the
# PyCFunctionObject instance
# "f" is the same value, but cast to (PyCFunctionObject*)
# "self" is the (PyObject*) of the 'self'
try:
# Use the prettyprinter for the func:
func = frame.read_var(arg_name)
return str(func)
except ValueError:
return ('PyCFunction invocation (unable to read %s: '
'missing debuginfos?)' % arg_name)
except RuntimeError:
return 'PyCFunction invocation (unable to read %s)' % arg_name
if caller == 'wrapper_call':
arg_name = 'wp'
try:
func = frame.read_var(arg_name)
return str(func)
except ValueError:
return ('<wrapper_call invocation (unable to read %s: '
'missing debuginfos?)>' % arg_name)
except RuntimeError:
return '<wrapper_call invocation (unable to read %s)>' % arg_name
# This frame isn't worth reporting:
return False
def is_waiting_for_gil(self):
'''Is this frame waiting on the GIL?'''
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return (name == 'take_gil')
def is_gc_collect(self):
'''Is this frame gc_collect_main() within the garbage-collector?'''
return self._gdbframe.name() in ('collect', 'gc_collect_main')
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python-related code in the selected
frame, or None'''
try:
frame = cls.get_selected_frame()
except gdb.error:
# No frame: Python didn't start yet
return None
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
'''Try to obtain the Frame for the python bytecode interpreter in the
selected GDB frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframe():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('#%i %s\n' % (self.get_index(), info))
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print(UNABLE_READ_INFO_PYTHON_FRAME)
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if lineno is None:
print('Unable to read python frame line number')
return
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
sys.stdout.write('Traceback (most recent call first):\n')
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print(UNABLE_READ_INFO_PYTHON_FRAME)
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print(UNABLE_READ_INFO_PYTHON_FRAME)
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| 33.575114 | 102 | 0.576938 |
bc6655c76acee5216c554a12d8bbeb5016b73f49 | 570 | py | Python | example/test_settings.py | imtapps/django-attachment | 8a56dd61ca950b24ad6f2c731009e02528f6c474 | [
"BSD-2-Clause"
] | null | null | null | example/test_settings.py | imtapps/django-attachment | 8a56dd61ca950b24ad6f2c731009e02528f6c474 | [
"BSD-2-Clause"
] | null | null | null | example/test_settings.py | imtapps/django-attachment | 8a56dd61ca950b24ad6f2c731009e02528f6c474 | [
"BSD-2-Clause"
] | null | null | null | from settings import *
SOUTH_TESTS_MIGRATE = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test_db',
}
}
PROJECT_APPS = ('attachments', )
try:
import django_jenkins
INSTALLED_APPS = INSTALLED_APPS + ('django_jenkins',)
JENKINS_TASKS = (
'django_jenkins.tasks.django_tests',
'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.run_pep8',
'django_jenkins.tasks.run_pyflakes',
'django_jenkins.tasks.with_coverage',
)
except ImportError:
pass | 20.357143 | 57 | 0.649123 |
55664403f4359e78b5a7667017e7698f335c90ea | 15,017 | py | Python | yaml_conversion/lib/google/appengine/api/yaml_builder.py | RichieEscarez/appengine-config-transformer | f893de9e9afb7aa28eb231de75ab2e3590279226 | [
"Apache-2.0"
] | 9 | 2016-07-27T22:50:32.000Z | 2020-01-13T19:32:09.000Z | yaml_conversion/lib/google/appengine/api/yaml_builder.py | RichieEscarez/appengine-config-transformer | f893de9e9afb7aa28eb231de75ab2e3590279226 | [
"Apache-2.0"
] | 5 | 2017-03-17T17:02:41.000Z | 2020-01-07T12:09:11.000Z | yaml_conversion/lib/google/appengine/api/yaml_builder.py | RichieEscarez/appengine-config-transformer | f893de9e9afb7aa28eb231de75ab2e3590279226 | [
"Apache-2.0"
] | 18 | 2016-01-26T21:06:26.000Z | 2021-10-09T15:02:43.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python2.4
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""PyYAML event builder handler
Receives events from YAML listener and forwards them to a builder
object so that it can construct a properly structured object.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
from yaml_conversion.lib.google.appengine.api import yaml_errors
from yaml_conversion.lib.google.appengine.api import yaml_listener
import yaml
# Token constants used by handler for keeping track of handler state.
_TOKEN_DOCUMENT = 'document'
_TOKEN_SEQUENCE = 'sequence'
_TOKEN_MAPPING = 'mapping'
_TOKEN_KEY = 'key'
_TOKEN_VALUES = frozenset((
_TOKEN_DOCUMENT,
_TOKEN_SEQUENCE,
_TOKEN_MAPPING,
_TOKEN_KEY))
class Builder(object):
"""Interface for building documents and type from YAML events.
Implement this interface to create a new builder. Builders are
passed to the BuilderHandler and used as a factory and assembler
for creating concrete representations of YAML files.
"""
def BuildDocument(self):
"""Build new document.
The object built by this method becomes the top level entity
that the builder handler constructs. The actual type is
determined by the sub-class of the Builder class and can essentially
be any type at all. This method is always called when the parser
encounters the start of a new document.
Returns:
New object instance representing concrete document which is
returned to user via BuilderHandler.GetResults().
"""
def InitializeDocument(self, document, value):
"""Initialize document with value from top level of document.
This method is called when the root document element is encountered at
the top level of a YAML document. It should get called immediately
after BuildDocument.
Receiving the None value indicates the empty document.
Args:
document: Document as constructed in BuildDocument.
value: Scalar value to initialize the document with.
"""
def BuildMapping(self, top_value):
"""Build a new mapping representation.
Called when StartMapping event received. Type of object is determined
by Builder sub-class.
Args:
top_value: Object which will be new mappings parant. Will be object
returned from previous call to BuildMapping or BuildSequence.
Returns:
Instance of new object that represents a mapping type in target model.
"""
def EndMapping(self, top_value, mapping):
"""Previously constructed mapping scope is at an end.
Called when the end of a mapping block is encountered. Useful for
additional clean up or end of scope validation.
Args:
top_value: Value which is parent of the mapping.
mapping: Mapping which is at the end of its scope.
"""
def BuildSequence(self, top_value):
"""Build a new sequence representation.
Called when StartSequence event received. Type of object is determined
by Builder sub-class.
Args:
top_value: Object which will be new sequences parant. Will be object
returned from previous call to BuildMapping or BuildSequence.
Returns:
Instance of new object that represents a sequence type in target model.
"""
def EndSequence(self, top_value, sequence):
"""Previously constructed sequence scope is at an end.
Called when the end of a sequence block is encountered. Useful for
additional clean up or end of scope validation.
Args:
top_value: Value which is parent of the sequence.
sequence: Sequence which is at the end of its scope.
"""
def MapTo(self, subject, key, value):
"""Map value to a mapping representation.
Implementation is defined by sub-class of Builder.
Args:
subject: Object that represents mapping. Value returned from
BuildMapping.
key: Key used to map value to subject. Can be any scalar value.
value: Value which is mapped to subject. Can be any kind of value.
"""
def AppendTo(self, subject, value):
"""Append value to a sequence representation.
Implementation is defined by sub-class of Builder.
Args:
subject: Object that represents sequence. Value returned from
BuildSequence
value: Value to be appended to subject. Can be any kind of value.
"""
class BuilderHandler(yaml_listener.EventHandler):
"""PyYAML event handler used to build objects.
Maintains state information as it receives parse events so that object
nesting is maintained. Uses provided builder object to construct and
assemble objects as it goes.
As it receives events from the YAML parser, it builds a stack of data
representing structural tokens. As the scope of documents, mappings
and sequences end, those token, value pairs are popped from the top of
the stack so that the original scope can resume processing.
A special case is made for the _KEY token. It represents a temporary
value which only occurs inside mappings. It is immediately popped off
the stack when it's associated value is encountered in the parse stream.
It is necessary to do this because the YAML parser does not combine
key and value information in to a single event.
"""
def __init__(self, builder):
"""Initialization for builder handler.
Args:
builder: Instance of Builder class.
Raises:
ListenerConfigurationError when builder is not a Builder class.
"""
if not isinstance(builder, Builder):
raise yaml_errors.ListenerConfigurationError(
'Must provide builder of type yaml_listener.Builder')
self._builder = builder
self._stack = None
self._top = None
self._results = []
def _Push(self, token, value):
"""Push values to stack at start of nesting.
When a new object scope is beginning, will push the token (type of scope)
along with the new objects value, the latter of which is provided through
the various build methods of the builder.
Args:
token: Token indicating the type of scope which is being created; must
belong to _TOKEN_VALUES.
value: Value to associate with given token. Construction of value is
determined by the builder provided to this handler at construction.
"""
# _top is an easy to use reference to the top of the handler stack.
self._top = (token, value)
self._stack.append(self._top)
def _Pop(self):
"""Pop values from stack at end of nesting.
Called to indicate the end of a nested scope.
Returns:
Previously pushed value at the top of the stack.
"""
assert self._stack != [] and self._stack is not None
token, value = self._stack.pop()
# Restore _top variable with previous values.
if self._stack:
self._top = self._stack[-1]
else:
self._top = None
return value
def _HandleAnchor(self, event):
"""Handle anchor attached to event.
Currently will raise an error if anchor is used. Anchors are used to
define a document wide tag to a given value (scalar, mapping or sequence).
Args:
event: Event which may have anchor property set.
Raises:
NotImplementedError if event attempts to use an anchor.
"""
# TODO(user): Implement anchors and aliases.
# If there is an anchor raise an error.
if hasattr(event, 'anchor') and event.anchor is not None:
raise NotImplementedError('Anchors not supported in this handler')
def _HandleValue(self, value):
"""Handle given value based on state of parser
This method handles the various values that are created by the builder
at the beginning of scope events (such as mappings and sequences) or
when a scalar value is received.
Method is called when handler receives a parser, MappingStart or
SequenceStart.
Args:
value: Value received as scalar value or newly constructed mapping or
sequence instance.
Raises:
InternalError if the building process encounters an unexpected token.
This is an indication of an implementation error in BuilderHandler.
"""
token, top_value = self._top
# If the last token was a key, it means that it is necessary
# to insert the value in to a map.
if token == _TOKEN_KEY:
# Fetch the key (removing from the stack)
key = self._Pop()
# New values at top of stack
mapping_token, mapping = self._top
assert _TOKEN_MAPPING == mapping_token
# Forward to builder for assembly
self._builder.MapTo(mapping, key, value)
# Parent object for new value is a mapping. It means that
# this value that is passed in is a scalar and should
# get placed on the stack as the key for the next value
# from the parser.
elif token == _TOKEN_MAPPING:
self._Push(_TOKEN_KEY, value)
# Parent is a sequence object. Append value to sequence.
elif token == _TOKEN_SEQUENCE:
self._builder.AppendTo(top_value, value)
# Events received at the document level are sent to the
# builder to initialize the actual document.
elif token == _TOKEN_DOCUMENT:
self._builder.InitializeDocument(top_value, value)
else:
raise yaml_errors.InternalError('Unrecognized builder token:\n%s' % token)
def StreamStart(self, event, loader):
"""Initializes internal state of handler
Args:
event: Ignored.
"""
assert self._stack is None
self._stack = []
self._top = None
self._results = []
def StreamEnd(self, event, loader):
"""Cleans up internal state of handler after parsing
Args:
event: Ignored.
"""
assert self._stack == [] and self._top is None
self._stack = None
def DocumentStart(self, event, loader):
"""Build new document.
Pushes new document on to stack.
Args:
event: Ignored.
"""
assert self._stack == []
self._Push(_TOKEN_DOCUMENT, self._builder.BuildDocument())
def DocumentEnd(self, event, loader):
"""End of document.
Args:
event: Ignored.
"""
assert self._top[0] == _TOKEN_DOCUMENT
self._results.append(self._Pop())
def Alias(self, event, loader):
"""Not implemented yet.
Args:
event: Ignored.
"""
raise NotImplementedError('References not supported in this handler')
def Scalar(self, event, loader):
"""Handle scalar value
Since scalars are simple values that are passed directly in by the
parser, handle like any value with no additional processing.
Of course, key values will be handles specially. A key value is recognized
when the top token is _TOKEN_MAPPING.
Args:
event: Event containing scalar value.
"""
self._HandleAnchor(event)
if event.tag is None and self._top[0] != _TOKEN_MAPPING:
# Try to calculate what tag should be. Might be an implicit
# type based on regex or path.
try:
tag = loader.resolve(yaml.nodes.ScalarNode,
event.value, event.implicit)
except IndexError:
# This exception might be thrown by PyYAML versions previous to
# 3.05. In this event, set default mapping.
tag = loader.DEFAULT_SCALAR_TAG
else:
tag = event.tag
if tag is None:
value = event.value
else:
# Do conversion of value to properly inferred type.
node = yaml.nodes.ScalarNode(tag,
event.value,
event.start_mark,
event.end_mark,
event.style)
value = loader.construct_object(node)
self._HandleValue(value)
def SequenceStart(self, event, loader):
"""Start of sequence scope
Create a new sequence from the builder and then handle in the context
of its parent.
Args:
event: SequenceStartEvent generated by loader.
loader: Loader that generated event.
"""
self._HandleAnchor(event)
token, parent = self._top
# If token is on stack, need to look one below it for real parent.
if token == _TOKEN_KEY:
token, parent = self._stack[-2]
sequence = self._builder.BuildSequence(parent)
self._HandleValue(sequence)
self._Push(_TOKEN_SEQUENCE, sequence)
def SequenceEnd(self, event, loader):
"""End of sequence.
Args:
event: Ignored
loader: Ignored.
"""
assert self._top[0] == _TOKEN_SEQUENCE
end_object = self._Pop()
top_value = self._top[1]
self._builder.EndSequence(top_value, end_object)
def MappingStart(self, event, loader):
"""Start of mapping scope.
Create a mapping from builder and then handle in the context of its
parent.
Args:
event: MappingStartEvent generated by loader.
loader: Loader that generated event.
"""
self._HandleAnchor(event)
token, parent = self._top
# If token is on stack, need to look one below it for real parent.
# A KEY indicates that the parser is processing a mapping. Since
# it is on the stack and will be removed by the _HandleValue it
# is necessary to look for the enclosing mapping object below the
# key on the stack.
if token == _TOKEN_KEY:
token, parent = self._stack[-2]
mapping = self._builder.BuildMapping(parent)
self._HandleValue(mapping)
self._Push(_TOKEN_MAPPING, mapping)
def MappingEnd(self, event, loader):
"""End of mapping
Args:
event: Ignored.
loader: Ignored.
"""
assert self._top[0] == _TOKEN_MAPPING
end_object = self._Pop()
top_value = self._top[1]
self._builder.EndMapping(top_value, end_object)
def GetResults(self):
"""Get results of document stream processing.
This method can be invoked after fully parsing the entire YAML file
to retrieve constructed contents of YAML file. Called after EndStream.
Returns:
A tuple of all document objects that were parsed from YAML stream.
Raises:
InternalError if the builder stack is not empty by the end of parsing.
"""
if self._stack is not None:
raise yaml_errors.InternalError('Builder stack is not empty.')
return tuple(self._results)
| 32.156317 | 80 | 0.694546 |
98af15fdaa30f8d450a9e74188b55916a1fa1a65 | 9,917 | py | Python | core/modules/modul/build.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | 2 | 2020-06-05T15:53:07.000Z | 2020-12-14T07:03:45.000Z | core/modules/modul/build.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | null | null | null | core/modules/modul/build.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import paddle
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
import yaml
from paddlerec.core.model import Model
from paddlerec.core.utils import table
def create(config):
"""
Create a model instance by config
Args:
config(dict) : desc model type and net
Return:
Model Instance
"""
model = None
if config['mode'] == 'fluid':
if config['layer_file'].endswith(".py"):
model_class = envs.lazy_instance_by_fliename(config['layer_file'],
"Model")
model = model_class(config)
else:
model = YamlModel(config)
model.train()
return model
class YamlModel(Model):
"""R
"""
def __init__(self, config):
"""R
"""
Model.__init__(self, config)
self._config = config
self._name = config['name']
f = open(config['layer_file'], 'r')
self._build_nodes = yaml.safe_load(f.read())
self._build_phase = ['input', 'param', 'summary', 'layer']
self._build_param = {
'layer': {},
'inner_layer': {},
'layer_extend': {},
'model': {}
}
self._inference_meta = {'dependency': {}, 'params': {}}
def train_net(self):
"""R
build a fluid model with config
Return:
modle_instance(dict)
train_program
startup_program
inference_param : all params name list
table: table-meta to ps-server
"""
for layer in self._build_nodes['layer']:
self._build_param['inner_layer'][layer['name']] = layer
self._build_param['table'] = {}
self._build_param['model']['train_program'] = paddle.sataic.Program()
self._build_param['model']['startup_program'] = paddle.static.Program()
with paddle.static.program_guard(
self._build_param['model']['train_program'],
self._build_param['model']['startup_program']):
with fluid.unique_name.guard():
for phase in self._build_phase:
if self._build_nodes[phase] is None:
continue
for node in self._build_nodes[phase]:
exec ("""layer=layer.{}(node)""".format(node['class']))
layer_output, extend_output = layer.generate(
self._config['mode'], self._build_param)
self._build_param['layer'][node['name']] = layer_output
self._build_param['layer_extend'][node[
'name']] = extend_output
if extend_output is None:
continue
if 'loss' in extend_output:
if self._cost is None:
self._cost = extend_output['loss']
else:
self._cost += extend_output['loss']
if 'data_var' in extend_output:
self._data_var += extend_output['data_var']
if 'metric_label' in extend_output and extend_output[
'metric_label'] is not None:
self._metrics[extend_output[
'metric_label']] = extend_output['metric_dict']
if 'inference_param' in extend_output:
inference_param = extend_output['inference_param']
param_name = inference_param['name']
if param_name not in self._build_param['table']:
self._build_param['table'][param_name] = {
'params': []
}
table_meta = table.TableMeta.alloc_new_table(
inference_param['table_id'])
self._build_param['table'][param_name][
'_meta'] = table_meta
self._build_param['table'][param_name][
'params'] += inference_param['params']
pass
@classmethod
def build_optimizer(self, params):
"""R
"""
optimizer_conf = params['optimizer_conf']
strategy = None
if 'strategy' in optimizer_conf:
strategy = optimizer_conf['strategy']
stat_var_names = []
metrics = params['metrics']
for name in metrics:
model_metrics = metrics[name]
stat_var_names += [
model_metrics[metric]['var'].name
for metric in model_metrics
]
strategy['stat_var_names'] = list(set(stat_var_names))
optimizer_generator = 'optimizer = fluid.optimizer.' + \
optimizer_conf['class'] + '(learning_rate=' + \
str(optimizer_conf['learning_rate']) + ')'
exec (optimizer_generator)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
return optimizer
def dump_model_program(self, path):
"""R
"""
with open(path + '/' + self._name + '_main_program.pbtxt',
"w") as fout:
print >> fout, self._build_param['model']['train_program']
with open(path + '/' + self._name + '_startup_program.pbtxt',
"w") as fout:
print >> fout, self._build_param['model']['startup_program']
pass
def shrink(self, params):
"""R
"""
scope = params['scope']
decay = params['decay']
for param_table in self._build_param['table']:
table_id = self._build_param['table'][param_table][
'_meta']._table_id
fleet.shrink_dense_table(decay, scope=scope, table_id=table_id)
def dump_inference_program(self, inference_layer, path):
"""R
"""
pass
def dump_inference_param(self, params):
"""R
"""
scope = params['scope']
executor = params['executor']
program = self._build_param['model']['train_program']
for table_name, table in self._build_param['table'].items():
fleet._fleet_ptr.pull_dense(scope, table['_meta']._table_id,
table['params'])
for infernce_item in params['inference_list']:
params_name_list = self.inference_params(infernce_item[
'layer_name'])
params_var_list = [
program.global_block().var(i) for i in params_name_list
]
params_file_name = infernce_item['save_file_name']
with paddle.static.scope_guard(scope):
if params['save_combine']:
fluid.io.save_vars(
executor,
"./",
program,
vars=params_var_list,
filename=params_file_name)
else:
fluid.io.save_vars(
executor,
params_file_name,
program,
vars=params_var_list)
def inference_params(self, inference_layer):
"""
get params name for inference_layer
Args:
inference_layer(str): layer for inference
Return:
params(list): params name list that for inference layer
"""
layer = inference_layer
if layer in self._inference_meta['params']:
return self._inference_meta['params'][layer]
self._inference_meta['params'][layer] = []
self._inference_meta['dependency'][layer] = self.get_dependency(
self._build_param['inner_layer'], layer)
for node in self._build_nodes['layer']:
if node['name'] not in self._inference_meta['dependency'][layer]:
continue
if 'inference_param' in self._build_param['layer_extend'][node[
'name']]:
self._inference_meta['params'][layer] += \
self._build_param['layer_extend'][node['name']
]['inference_param']['params']
return self._inference_meta['params'][layer]
def get_dependency(self, layer_graph, dest_layer):
"""
get model of dest_layer depends on
Args:
layer_graph(dict) : all model in graph
Return:
depend_layers(list) : sub-graph model for calculate dest_layer
"""
dependency_list = []
if dest_layer in layer_graph:
dependencys = copy.deepcopy(layer_graph[dest_layer]['input'])
dependency_list = copy.deepcopy(dependencys)
for dependency in dependencys:
dependency_list = dependency_list + self.get_dependency(
layer_graph, dependency)
return list(set(dependency_list))
| 39.987903 | 84 | 0.531512 |
840a0e596946dae689c8f6a1812c608b5c398ecc | 7,412 | py | Python | ngx_tail.py | billowqiu/nginx_log_stat | e2a58cf432cbd4147a4a64697df71d35f518a79d | [
"MIT"
] | null | null | null | ngx_tail.py | billowqiu/nginx_log_stat | e2a58cf432cbd4147a4a64697df71d35f518a79d | [
"MIT"
] | null | null | null | ngx_tail.py | billowqiu/nginx_log_stat | e2a58cf432cbd4147a4a64697df71d35f518a79d | [
"MIT"
] | null | null | null | #!/bin/env python
# -*- coding: utf-8 -*-
"""nginx_log_stat - realtime parse nginx log, send stat metric to statsd
modify from ngx_top https://github.com/lebinh/ngxtop
Usage:
nginx_log_stat [options] config --access-log=<access-log> --vhost-prefix=<vhost-prefix> --statsd-port=<statsd-port>
nginx_log_stat info
Options:
-f <format>, --log-format <format> log format as specify in log_format directive. [default: combined]
--no-follow ngxtop default behavior is to ignore current lines in log
and only watch for new lines as they are written to the access log.
Use this flag to tell ngxtop to process the current content of the access log instead.
-v, --verbose more verbose output
-d, --debug print every line and parsed record
-h, --help print this help message.
--version print version information.
Advanced / experimental options:
-c <file>, --config <file> allow ngxtop to parse nginx config file for log format and location.
-i <filter-expression>, --filter <filter-expression> filter in, records satisfied given expression are processed.
-p <filter-expression>, --pre-filter <filter-expression> in-filter expression to check in pre-parsing phase.
"""
from __future__ import print_function
import atexit
from contextlib import closing
import curses
import logging
import os
import sqlite3
import time
import sys
import signal
import socket
import statsd_cli
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import sys
sys.path.append('.')
from config_parser import detect_log_config, detect_config_path, extract_variables, build_pattern
from utils import error_exit
# ======================
# generator utilities
# ======================
def follow(the_file):
"""
Follow a given file and yield new lines when they are available, like `tail -f`.
"""
with open(the_file) as f:
f.seek(0, 2) # seek to eof
while True:
line = f.readline()
if not line:
time.sleep(0.1) # sleep briefly before trying again
continue
yield line
def map_field(field, func, dict_sequence):
"""
Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key.
"""
for item in dict_sequence:
try:
item[field] = func(item.get(field, None))
yield item
except ValueError:
pass
def add_field(field, func, dict_sequence):
"""
Apply given function to the record and store result in given field of current record.
Do nothing if record already contains given field.
"""
for item in dict_sequence:
if field not in item:
item[field] = func(item)
yield item
def trace(sequence, phase=''):
for item in sequence:
logging.debug('%s:\n%s', phase, item)
yield item
# ======================
# Access log parsing
# ======================
def parse_request_path(record):
if 'request_uri' in record:
uri = record['request_uri']
elif 'request' in record:
uri = ' '.join(record['request'].split(' ')[1:-1])
else:
uri = None
return urlparse.urlparse(uri).path if uri else None
def parse_status_type(record):
return record['status'] // 100 if 'status' in record else None
def to_int(value):
return int(value) if value and value != '-' else 0
def to_float(value):
return float(value) if value and value != '-' else 0.0
def parse_log(lines, pattern):
matches = (pattern.match(l) for l in lines)
records = (m.groupdict() for m in matches if m is not None)
records = map_field('status', to_int, records)
records = add_field('status_type', parse_status_type, records)
records = add_field('bytes_sent', lambda r: r['body_bytes_sent'], records)
records = map_field('bytes_sent', to_int, records)
records = map_field('request_time', to_float, records)
records = add_field('request_path', parse_request_path, records)
return records
def process_log(lines, pattern, processor, arguments):
print(lines)
pre_filer_exp = arguments['--pre-filter']
#下面的for循环会导致lines这个生成器的yield返回数据
if pre_filer_exp:
lines = (line for line in lines if eval(pre_filer_exp, {}, dict(line=line)))
#下面的for循环会导致records这个生成器的yield返回数据
records = parse_log(lines, pattern)
print(records)
filter_exp = arguments['--filter']
if filter_exp:
records = (r for r in records if eval(filter_exp, {}, r))
vhost_prefix = arguments['--vhost-prefix']
statsd_port = arguments['--statsd-port']
statsd = statsd_cli.StatsdClient(port=int(statsd_port))
#send to statsd
for record in records:
#这里兼容一下微官网的,如果是wechat则特殊处理一下
if vhost_prefix == 'wechat':
metric_qps = socket.gethostname() + '.nginx.qps'
else:
metric_qps = socket.gethostname() + '.' + vhost_prefix + '_nginx.qps'
print(metric_qps)
statsd.increment(metric_qps)
if record.has_key('status'):
if vhost_prefix == 'wechat':
metric_status = socket.gethostname() + '.' + 'nginx.' + 'status_code.' + str(record['status'])
else:
metric_status = socket.gethostname() + '.' + vhost_prefix + '_nginx.status_code.' + str(record['status'])
statsd.increment(metric_status)
def build_source(access_log, arguments):
# constructing log source
if access_log == 'stdin':
lines = sys.stdin
elif arguments['--no-follow']:
lines = open(access_log)
else:
lines = follow(access_log)
return lines
def process(arguments):
access_log = arguments['--access-log']
log_format = arguments['--log-format']
if access_log is None and not sys.stdin.isatty():
# assume logs can be fetched directly from stdin when piped
access_log = 'stdin'
if access_log is None:
access_log, log_format = detect_log_config(arguments)
logging.info('access_log: %s', access_log)
logging.info('log_format: %s', log_format)
if access_log != 'stdin' and not os.path.exists(access_log):
error_exit('access log file "%s" does not exist' % access_log)
if arguments['info']:
print('nginx configuration file:\n ', detect_config_path())
print('access log file:\n ', access_log)
print('access log format:\n ', log_format)
print('available variables:\n ', ', '.join(sorted(extract_variables(log_format))))
return
source = build_source(access_log, arguments)
pattern = build_pattern(log_format)
#processor = build_processor(arguments)
processor = None
process_log(source, pattern, processor, arguments)
def main():
from docopt import docopt
args = docopt(__doc__, version='nginx_log_stat 0.02')
print(args)
log_level = logging.WARNING
if args['--verbose']:
log_level = logging.INFO
if args['--debug']:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s')
logging.debug('arguments:\n%s', args)
try:
process(args)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
logging.basicConfig(filename='nginx_log_stat.log', level=logging.DEBUG)
main()
| 32.508772 | 121 | 0.65286 |
b6bd01e6ac5c67a0e5ebddf3cba54896b7cd4b88 | 646 | py | Python | signing/bin/dump_image_config.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | signing/bin/dump_image_config.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | 2 | 2021-03-26T00:29:32.000Z | 2021-04-30T21:29:33.000Z | signing/bin/dump_image_config.py | khromiumos/chromiumos-chromite | a42a85481cdd9d635dc40a04585e427f89f3bb3f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome OS Image kernel cmdline dump."""
from __future__ import print_function
from chromite.lib import commandline
from chromite.signing.image_signing import imagefile
def main(argv):
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('input_image', type='path',
help='Path to input image file')
options = parser.parse_args(argv)
options.Freeze()
imagefile.DumpConfig(options.input_image)
| 28.086957 | 72 | 0.739938 |
c549f614abf555c3a3f28c6f3142c66cd2470ee1 | 14,744 | py | Python | linda/linda_app/migrations/0001_initial.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | linda/linda_app/migrations/0001_initial.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | linda/linda_app/migrations/0001_initial.py | cbotsikas/LindaWorkbench | b2bfa091fb4ec80ac35b3f68edf46780c1e9ffea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DatasourceDescription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=512)),
('name', models.CharField(max_length=512)),
('uri', models.CharField(max_length=2048)),
('createdOn', models.DateField()),
('lastUpdateOn', models.DateField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nameAppearance', models.CharField(max_length=256, null=True, blank=True)),
('country', models.CharField(default=b'--', max_length=3, choices=[(b'--', 'Please choose a country..'), (b'AD', 'Andorra'), (b'AE', 'United Arab Emirates'), (b'AF', 'Afghanistan'), (b'AG', 'Antigua & Barbuda'), (b'AI', 'Anguilla'), (b'AL', 'Albania'), (b'AM', 'Armenia'), (b'AN', 'Netherlands Antilles'), (b'AO', 'Angola'), (b'AQ', 'Antarctica'), (b'AR', 'Argentina'), (b'AS', 'American Samoa'), (b'AT', 'Austria'), (b'AU', 'Australia'), (b'AW', 'Aruba'), (b'AZ', 'Azerbaijan'), (b'BA', 'Bosnia and Herzegovina'), (b'BB', 'Barbados'), (b'BD', 'Bangladesh'), (b'BE', 'Belgium'), (b'BF', 'Burkina Faso'), (b'BG', 'Bulgaria'), (b'BH', 'Bahrain'), (b'BI', 'Burundi'), (b'BJ', 'Benin'), (b'BM', 'Bermuda'), (b'BN', 'Brunei Darussalam'), (b'BO', 'Bolivia'), (b'BR', 'Brazil'), (b'BS', 'Bahama'), (b'BT', 'Bhutan'), (b'BV', 'Bouvet Island'), (b'BW', 'Botswana'), (b'BY', 'Belarus'), (b'BZ', 'Belize'), (b'CA', 'Canada'), (b'CC', 'Cocos (Keeling) Islands'), (b'CF', 'Central African Republic'), (b'CG', 'Congo'), (b'CH', 'Switzerland'), (b'CI', 'Ivory Coast'), (b'CK', 'Cook Iislands'), (b'CL', 'Chile'), (b'CM', 'Cameroon'), (b'CN', 'China'), (b'CO', 'Colombia'), (b'CR', 'Costa Rica'), (b'CU', 'Cuba'), (b'CV', 'Cape Verde'), (b'CX', 'Christmas Island'), (b'CY', 'Cyprus'), (b'CZ', 'Czech Republic'), (b'DE', 'Germany'), (b'DJ', 'Djibouti'), (b'DK', 'Denmark'), (b'DM', 'Dominica'), (b'DO', 'Dominican Republic'), (b'DZ', 'Algeria'), (b'EC', 'Ecuador'), (b'EE', 'Estonia'), (b'EG', 'Egypt'), (b'EH', 'Western Sahara'), (b'ER', 'Eritrea'), (b'ES', 'Spain'), (b'ET', 'Ethiopia'), (b'FI', 'Finland'), (b'FJ', 'Fiji'), (b'FK', 'Falkland Islands (Malvinas)'), (b'FM', 'Micronesia'), (b'FO', 'Faroe Islands'), (b'FR', 'France'), (b'FX', 'France, Metropolitan'), (b'GA', 'Gabon'), (b'GB', 'United Kingdom (Great Britain)'), (b'GD', 'Grenada'), (b'GE', 'Georgia'), (b'GF', 'French Guiana'), (b'GH', 'Ghana'), (b'GI', 'Gibraltar'), (b'GL', 'Greenland'), (b'GM', 'Gambia'), (b'GN', 'Guinea'), (b'GP', 'Guadeloupe'), (b'GQ', 'Equatorial Guinea'), (b'GR', 'Greece'), (b'GS', 'South Georgia and the South Sandwich Islands'), (b'GT', 'Guatemala'), (b'GU', 'Guam'), (b'GW', 'Guinea-Bissau'), (b'GY', 'Guyana'), (b'HK', 'Hong Kong'), (b'HM', 'Heard & McDonald Islands'), (b'HN', 'Honduras'), (b'HR', 'Croatia'), (b'HT', 'Haiti'), (b'HU', 'Hungary'), (b'ID', 'Indonesia'), (b'IE', 'Ireland'), (b'IL', 'Israel'), (b'IN', 'India'), (b'IO', 'British Indian Ocean Territory'), (b'IQ', 'Iraq'), (b'IR', 'Islamic Republic of Iran'), (b'IS', 'Iceland'), (b'IT', 'Italy'), (b'JM', 'Jamaica'), (b'JO', 'Jordan'), (b'JP', 'Japan'), (b'KE', 'Kenya'), (b'KG', 'Kyrgyzstan'), (b'KH', 'Cambodia'), (b'KI', 'Kiribati'), (b'KM', 'Comoros'), (b'KN', 'St. Kitts and Nevis'), (b'KP', "Korea, Democratic People's Republic of"), (b'KR', 'Korea, Republic of'), (b'KW', 'Kuwait'), (b'KY', 'Cayman Islands'), (b'KZ', 'Kazakhstan'), (b'LA', "Lao People's Democratic Republic"), (b'LB', 'Lebanon'), (b'LC', 'Saint Lucia'), (b'LI', 'Liechtenstein'), (b'LK', 'Sri Lanka'), (b'LR', 'Liberia'), (b'LS', 'Lesotho'), (b'LT', 'Lithuania'), (b'LU', 'Luxembourg'), (b'LV', 'Latvia'), (b'LY', 'Libyan Arab Jamahiriya'), (b'MA', 'Morocco'), (b'MC', 'Monaco'), (b'MD', 'Moldova, Republic of'), (b'MG', 'Madagascar'), (b'MH', 'Marshall Islands'), (b'ML', 'Mali'), (b'MN', 'Mongolia'), (b'MM', 'Myanmar'), (b'MO', 'Macau'), (b'MP', 'Northern Mariana Islands'), (b'MQ', 'Martinique'), (b'MR', 'Mauritania'), (b'MS', 'Monserrat'), (b'MT', 'Malta'), (b'MU', 'Mauritius'), (b'MV', 'Maldives'), (b'MW', 'Malawi'), (b'MX', 'Mexico'), (b'MY', 'Malaysia'), (b'MZ', 'Mozambique'), (b'NA', 'Namibia'), (b'NC', 'New Caledonia'), (b'NE', 'Niger'), (b'NF', 'Norfolk Island'), (b'NG', 'Nigeria'), (b'NI', 'Nicaragua'), (b'NL', 'Netherlands'), (b'NO', 'Norway'), (b'NP', 'Nepal'), (b'NR', 'Nauru'), (b'NU', 'Niue'), (b'NZ', 'New Zealand'), (b'OM', 'Oman'), (b'PA', 'Panama'), (b'PE', 'Peru'), (b'PF', 'French Polynesia'), (b'PG', 'Papua New Guinea'), (b'PH', 'Philippines'), (b'PK', 'Pakistan'), (b'PL', 'Poland'), (b'PM', 'St. Pierre & Miquelon'), (b'PN', 'Pitcairn'), (b'PR', 'Puerto Rico'), (b'PT', 'Portugal'), (b'PW', 'Palau'), (b'PY', 'Paraguay'), (b'QA', 'Qatar'), (b'RE', 'Reunion'), (b'RO', 'Romania'), (b'RU', 'Russian Federation'), (b'RW', 'Rwanda'), (b'SA', 'Saudi Arabia'), (b'SB', 'Solomon Islands'), (b'SC', 'Seychelles'), (b'SD', 'Sudan'), (b'SE', 'Sweden'), (b'SG', 'Singapore'), (b'SH', 'St. Helena'), (b'SI', 'Slovenia'), (b'SJ', 'Svalbard & Jan Mayen Islands'), (b'SK', 'Slovakia'), (b'SL', 'Sierra Leone'), (b'SM', 'San Marino'), (b'SN', 'Senegal'), (b'SO', 'Somalia'), (b'SR', 'Suriname'), (b'ST', 'Sao Tome & Principe'), (b'SV', 'El Salvador'), (b'SY', 'Syrian Arab Republic'), (b'SZ', 'Swaziland'), (b'TC', 'Turks & Caicos Islands'), (b'TD', 'Chad'), (b'TF', 'French Southern Territories'), (b'TG', 'Togo'), (b'TH', 'Thailand'), (b'TJ', 'Tajikistan'), (b'TK', 'Tokelau'), (b'TM', 'Turkmenistan'), (b'TN', 'Tunisia'), (b'TO', 'Tonga'), (b'TP', 'East Timor'), (b'TR', 'Turkey'), (b'TT', 'Trinidad & Tobago'), (b'TV', 'Tuvalu'), (b'TW', 'Taiwan, Province of China'), (b'TZ', 'Tanzania, United Republic of'), (b'UA', 'Ukraine'), (b'UG', 'Uganda'), (b'UM', 'United States Minor Outlying Islands'), (b'US', 'United States of America'), (b'UY', 'Uruguay'), (b'UZ', 'Uzbekistan'), (b'VA', 'Vatican City State (Holy See)'), (b'VC', 'St. Vincent & the Grenadines'), (b'VE', 'Venezuela'), (b'VG', 'British Virgin Islands'), (b'VI', 'United States Virgin Islands'), (b'VN', 'Viet Nam'), (b'VU', 'Vanuatu'), (b'WF', 'Wallis & Futuna Islands'), (b'WS', 'Samoa'), (b'YE', 'Yemen'), (b'YT', 'Mayotte'), (b'YU', 'Yugoslavia'), (b'ZA', 'South Africa'), (b'ZM', 'Zambia'), (b'ZR', 'Zaire'), (b'ZW', 'Zimbabwe')])),
('facebookUrl', models.URLField(max_length=256, null=True, blank=True)),
('twitterUrl', models.URLField(max_length=256, null=True, blank=True)),
('googleUrl', models.URLField(max_length=256, null=True, blank=True)),
('linkedInUrl', models.URLField(max_length=256, null=True, blank=True)),
('websiteUrl', models.URLField(max_length=256, null=True, blank=True)),
('scientific_background', models.CharField(default=b'--', max_length=10, choices=[(b'--', 'My background is...'), (b'MAT', 'Mathematics'), (b'CIS', 'Computer and information sciences'), (b'PHYS', 'Physical sciences'), (b'CHES', 'Chemical sciences'), (b'ERES', 'Earth and related environmental sciences'), (b'BIS', 'Biological sciences'), (b'ONS', 'Other natural sciences'), (b'CIE', 'Civil engineering'), (b'ELE', 'Electrical engineering, electronic'), (b'EIE', 'engineering, information engineering'), (b'MCE', 'Mechanical engineering'), (b'CHE', 'Chemical engineering'), (b'MAE', 'Materials engineering'), (b'MEE', 'Medical engineering'), (b'ENE', 'Environmental engineering'), (b'ENB', 'Environmental biotechnology'), (b'INB', 'Industrial Biotechnology'), (b'NTE', 'Nano-technology'), (b'OET', 'Other engineering and technologies'), (b'BAM', 'Basic medicine'), (b'CLM', 'Clinical medicine'), (b'HSC', 'Health sciences'), (b'HBI', 'Health biotechnology'), (b'OMS', 'Other medical sciences'), (b'AFF', 'Agriculture, forestry, and fisheries'), (b'ADS', 'Animal and dairy science'), (b'VES', 'Veterinary science'), (b'AGB', 'Agricultural biotechnology'), (b'OAS', 'Other agricultural sciences'), (b'PSY', 'Psychology'), (b'EB', 'Economics and business'), (b'EDS', 'Educational sciences'), (b'SOC', 'Sociology'), (b'LAW', 'Law'), (b'PS', 'Political Science'), (b'SEG', 'Social and economic geography'), (b'MC', 'Media and communications'), (b'OSC', 'Other social sciences'), (b'HA', 'History and archaeology'), (b'LANG', 'Languages and literature'), (b'PHIL', 'Philosophy, ethics and religion'), (b'ART', 'Art (arts, history of arts, performing arts, music)'), (b'OH', 'Other humanities')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Vocabulary',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=128)),
('description', models.CharField(max_length=2048)),
('category', models.CharField(blank=True, max_length=256, choices=[(b'AG', 'Agriculture'), (b'ART', 'Arts, Recreation and Travel'), (b'BFI', 'Banking, Finance and Insurance'), (b'BDMD', 'Births, Deaths, Marriages and Divorces'), (b'BE', 'Business / Enterprises'), (b'CE', 'Census'), (b'CH', 'Construction and Housing'), (b'CO', 'Contributors'), (b'DI', 'Diplomacy'), (b'EC', 'Economic'), (b'ED', 'Education'), (b'EL', 'Elections'), (b'EM', 'Employment'), (b'EU', 'Energy and Utilities'), (b'EN', 'Environment'), (b'FA', 'Farming'), (b'FI', 'Financials'), (b'FCA', 'Foreign Commerce and Aid'), (b'FT', 'Foreign Trade'), (b'GE', 'Geography and Environment'), (b'HN', 'Health and Nutrition'), (b'IEPW', 'Income, Expenditures, Poverty, and Wealth'), (b'IC', 'Information and Communications'), (b'IS', 'International Statistics'), (b'LFEE', 'Labor Force, Employment, and Earnings'), (b'LECP', 'Law Enforcement, Courts, and Prisons'), (b'MA', 'Manufactures'), (b'MI', 'Military'), (b'NSVA', 'National Security and Veterans Affairs'), (b'NR', 'Natural Resources'), (b'OT', 'Other'), (b'PO', 'Population'), (b'PR', 'Prices'), (b'PABF', 'Public Agencies Budget and Finances'), (b'PAE', 'Public Agencies Employment'), (b'ST', 'Science and Technology'), (b'SIHS', 'Social Insurance and Human Services'), (b'SP', 'Space'), (b'TA', 'Taxation'), (b'TR', 'Transportation'), (b'WE', 'Welfare'), (b'WRT', 'Wholesale and Retail Trade')])),
('originalUrl', models.URLField(max_length=256, null=True)),
('downloadUrl', models.URLField(max_length=256, null=True)),
('preferredNamespaceUri', models.URLField(max_length=1024, null=True)),
('preferredNamespacePrefix', models.CharField(max_length=256, null=True)),
('example', models.CharField(max_length=8196, blank=True)),
('datePublished', models.DateField(null=True, blank=True)),
('dateCreated', models.DateField(null=True, blank=True)),
('dateModified', models.DateField(null=True, blank=True)),
('score', models.IntegerField()),
('votes', models.IntegerField()),
('downloads', models.IntegerField()),
('uploader', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VocabularyClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uri', models.URLField(max_length=1024, null=True)),
('label', models.CharField(max_length=128)),
('ranking', models.FloatField()),
('vocabulary', models.ForeignKey(to='linda_app.Vocabulary')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VocabularyComments',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('commentText', models.CharField(max_length=512)),
('timePosted', models.DateTimeField(null=True, blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('vocabularyCommented', models.ForeignKey(to='linda_app.Vocabulary')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VocabularyProperty',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uri', models.URLField(max_length=1024, null=True)),
('label', models.CharField(max_length=128)),
('ranking', models.FloatField()),
('vocabulary', models.ForeignKey(to='linda_app.Vocabulary')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VocabularyRanking',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('vote', models.IntegerField()),
('vocabularyRanked', models.ForeignKey(to='linda_app.Vocabulary')),
('voter', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='photo',
name='user_profile',
field=models.ForeignKey(to='linda_app.UserProfile'),
preserve_default=True,
),
]
| 106.84058 | 5,758 | 0.569384 |
2915043b8e672314b6d9de6539bb9ca497a1983d | 3,436 | py | Python | jaxrl/networks/common.py | ethanluoyc/jax-rl | 93b41f8038675b5364f89688643f439a21f8e1b8 | [
"MIT"
] | null | null | null | jaxrl/networks/common.py | ethanluoyc/jax-rl | 93b41f8038675b5364f89688643f439a21f8e1b8 | [
"MIT"
] | null | null | null | jaxrl/networks/common.py | ethanluoyc/jax-rl | 93b41f8038675b5364f89688643f439a21f8e1b8 | [
"MIT"
] | null | null | null | import os
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
import optax
def default_init(scale: Optional[float] = jnp.sqrt(2)):
return nn.initializers.orthogonal(scale)
PRNGKey = Any
Params = flax.core.FrozenDict[str, Any]
PRNGKey = Any
Shape = Sequence[int]
Dtype = Any # this could be a real type?
InfoDict = Dict[str, float]
class MLP(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
activate_final: int = False
dropout_rate: Optional[float] = None
@nn.compact
def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:
for i, size in enumerate(self.hidden_dims):
x = nn.Dense(size, kernel_init=default_init())(x)
if i + 1 < len(self.hidden_dims) or self.activate_final:
x = self.activations(x)
if self.dropout_rate is not None:
x = nn.Dropout(rate=self.dropout_rate)(
x, deterministic=not training)
return x
# TODO: Replace with TrainState when it's ready
# https://github.com/google/flax/blob/master/docs/flip/1009-optimizer-api.md#train-state
@flax.struct.dataclass
class Model:
step: int
apply_fn: Callable[..., Any] = flax.struct.field(pytree_node=False)
params: Params
tx: Optional[optax.GradientTransformation] = flax.struct.field(
pytree_node=False)
opt_state: Optional[optax.OptState] = None
@classmethod
def create(cls,
model_def: nn.Module,
inputs: Sequence[jnp.ndarray],
tx: Optional[optax.GradientTransformation] = None) -> 'Model':
variables = model_def.init(*inputs)
_, params = variables.pop('params')
if tx is not None:
opt_state = tx.init(params)
else:
opt_state = None
return cls(step=1,
apply_fn=model_def.apply,
params=params,
tx=tx,
opt_state=opt_state)
def __call__(self, *args, **kwargs):
return self.apply_fn({'params': self.params}, *args, **kwargs)
def apply_gradient(
self,
loss_fn: Callable[[Params], Any],
has_aux: bool = True) -> Union[Tuple['Model', Any], 'Model']:
grad_fn = jax.grad(loss_fn, has_aux=has_aux)
if has_aux:
grads, aux = grad_fn(self.params)
else:
grads = grad_fn(self.params)
updates, new_opt_state = self.tx.update(grads, self.opt_state,
self.params)
new_params = optax.apply_updates(self.params, updates)
new_model = self.replace(step=self.step + 1,
params=new_params,
opt_state=new_opt_state)
if has_aux:
return new_model, aux
else:
return new_model
def save(self, save_path: str):
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, 'wb') as f:
f.write(flax.serialization.to_bytes(self.params))
def load(self, load_path: str) -> 'Model':
with open(load_path, 'rb') as f:
params = flax.serialization.from_bytes(self.params, f.read())
return self.replace(params=params)
| 32.415094 | 88 | 0.597788 |
98d9e2740f28b3cc8cc643fbf90cf9b439a1fc53 | 4,331 | py | Python | strategy_and_environment/strategy_banditsMETC.py | ShenGroup/MPMAB_BEACON | 5045a4921cf178380d7ecb8d9aca7de2fa7c3641 | [
"MIT"
] | 1 | 2022-01-10T02:05:37.000Z | 2022-01-10T02:05:37.000Z | strategy_and_environment/strategy_banditsMETC.py | WeiXiongUST/MPMAB_BEACON | 5045a4921cf178380d7ecb8d9aca7de2fa7c3641 | [
"MIT"
] | null | null | null | strategy_and_environment/strategy_banditsMETC.py | WeiXiongUST/MPMAB_BEACON | 5045a4921cf178380d7ecb8d9aca7de2fa7c3641 | [
"MIT"
] | 1 | 2021-12-05T13:32:18.000Z | 2021-12-05T13:32:18.000Z | from .communication_helper import *
class METC_FullSensingMultiPlayerMAB(object):
"""
Structure of stochastic MPMAB in the full sensing model. The model can be either Homogeneous or Heterogeneous.
"""
def __init__(self,
means,
nplayers,
narms,
horizon,
strategy,
reward_func='linear',
**kwargs):
self.K = narms
self.M = nplayers
self.T = horizon
self.means = np.array(means)
self.players = [strategy(narms=self.K, **kwargs) for _ in range(nplayers)]
self.strategy = strategy
self.rewards = []
self.history = []
self.reward_func = reward_func
self.epsilon = 0.01
def reward_function(self, rews):
if self.reward_func == 'linear':
return np.sum(rews)
elif self.reward_func == 'max_min fairness':
return np.min(rews)
elif self.reward_func == 'proportional fairness':
return np.sum(np.log(self.epsilon + rews))
def simulate_single_step_rewards(self):
return np.random.binomial(1, self.means)
def simulate_single_step(self, plays):
unique, counts = np.unique(plays, return_counts=True) # compute the number of pulls per arm
# remove the collisions
collisions = unique[counts > 1] # arms where collisions happen
cols = np.array([p in collisions for p in plays]) # the value is 1 if there is collision (at the arm)
rewards = self.simulate_single_step_rewards() # generate the statistic X_k(t)
rews = rewards[list(range(self.M)), plays] * (1 - cols)
return list(zip(rewards[list(range(self.M)), plays], cols)), rews
def simulate(self):
last_flag = False
for t in range(self.T):
plays = [(int)(player.play()) for player in self.players] # plays of all players
obs, rews = self.simulate_single_step(plays) # observations of all players
for i in range(self.M):
self.players[i].update(plays[i], obs[i]) # update strategies of all player
reward_one_round = self.reward_function(rews)
self.rewards.append(reward_one_round) # list of rewards
for i in range(self.M):
if last_flag and self.players[i].is_leader and self.players[i].phase == self.players[i].EXPLORATION:
leader = 0
for k in range(self.M):
if self.players[k].is_leader:
leader = k
for j in range(self.players[i].record_arm_to_explore.shape[1]):
tmp = 0
for ii in range(self.M):
indx = self.players[ii].relative_position - 1
tmp += self.means[ii, self.players[leader].record_arm_to_explore[indx, j]]
for l in range(2**self.players[leader].p):
self.history.append(tmp)
last_flag = False
if self.players[i].is_leader and self.players[i].phase == self.players[i].COMMUNICATION:
last_flag = True
def get_results(self):
# Find optimal matching first
best_choice = Oracle(self.means, self.reward_func)
top_mean = 0
if self.reward_func == 'linear':
top_mean = np.sum(self.means[list(range(self.M)), best_choice])
elif self.reward_func == 'max_min fairness':
top_mean = np.prod(self.means[list(range(self.M)), best_choice])
elif self.reward_func == 'proportional fairness':
top_mean = np.sum(self.means[list(range(self.M)), best_choice]*np.log(1+self.epsilon)) + np.sum((1-self.means[list(range(self.M)), best_choice])*np.log(self.epsilon))
best_case_reward = top_mean * np.arange(1, self.T+1)
cumulated_reward = np.cumsum(self.rewards)
regret = best_case_reward - cumulated_reward[:self.T]
return regret
def reset(self):
self.players = [self.strategy(narms=self.K) for _ in range(self.M)]
self.rewards = []
self.history = []
| 43.31 | 179 | 0.56592 |
33e69109c61a310441c6f32b622fe9bc8c44e090 | 2,291 | py | Python | make.py | lyan-pi/python-pawncc | 3eaf15d1959fb50343a48dca0e082ed064fb3658 | [
"MIT"
] | null | null | null | make.py | lyan-pi/python-pawncc | 3eaf15d1959fb50343a48dca0e082ed064fb3658 | [
"MIT"
] | null | null | null | make.py | lyan-pi/python-pawncc | 3eaf15d1959fb50343a48dca0e082ed064fb3658 | [
"MIT"
] | null | null | null |
#
# https://spdx.org/licenses/MIT.html
# https://opensource.org/licenses/MIT
#
# Full name: MIT License
# Short identifier: MIT
#
# Copyright (c) 2021 - 2022 Lyan.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from datetime import datetime
from datetime import date
import subprocess
old_contents = []
minor_build = 0
# Path to folder with your server files, e.g: E:/samp-server.
path = ""
build = open(f'{path}/gamemodes/src/build/version.inc', 'r')
count = 0;
for line in build:
old_contents.append(line.strip())
build.close()
for word in old_contents[3].split():
if word.isdigit():
minor_build = int(word)
today = date.today()
now = datetime.now()
major_build = today.strftime("%Y")
df = today.strftime("%d.%m.%Y")
hf = now.strftime("%H:%M")
minor_build += 1
info = [
f"#define BUILD_DATE \"{df}\"\n",
f"#define BUILD_TIME \"{hf}\"\n",
f"#define BUILD_MAJOR {major_build}\n",
f"#define BUILD_MINOR {minor_build}\n",
"#define AUTHOR \"Lyan\"\n"
]
build = open(f'{path}/gamemodes/src/build/version.inc', 'w')
build.writelines(info)
build.close()
subprocess.run(f"{path}/build/pawncc.exe {path}\gamemodes\main.pwn -Dgamemodes -;+ -(+ -d3 -Z+")
| 32.267606 | 97 | 0.693147 |
38ac132375ecb2ab0d618825b099ea062fdfcb1f | 2,140 | py | Python | run_model.py | qinglanDeku/SRN-Deblur | 5855e3fb7053753a78a7a128a9ccd9321c03ab0c | [
"MIT"
] | null | null | null | run_model.py | qinglanDeku/SRN-Deblur | 5855e3fb7053753a78a7a128a9ccd9321c03ab0c | [
"MIT"
] | null | null | null | run_model.py | qinglanDeku/SRN-Deblur | 5855e3fb7053753a78a7a128a9ccd9321c03ab0c | [
"MIT"
] | null | null | null | import os
import argparse
import tensorflow as tf
# import models.model_gray as model
# import models.model_color as model
import models.model as model
def parse_args():
parser = argparse.ArgumentParser(description='deblur arguments')
parser.add_argument('--phase', type=str, default='test', help='determine whether train or test')
parser.add_argument('--datalist', type=str, default='./datalist_gopro.txt', help='training datalist')
parser.add_argument('--model', type=str, default='color', help='model type: [lstm | gray | color]')
parser.add_argument('--batch_size', help='training batch size', type=int, default=16)
parser.add_argument('--epoch', help='training epoch number', type=int, default=4000)
parser.add_argument('--lr', type=float, default=1e-4, dest='learning_rate', help='initial learning rate')
parser.add_argument('--gpu', dest='gpu_id', type=str, default='0', help='use gpu or cpu')
parser.add_argument('--height', type=int, default=720,
help='height for the tensorflow placeholder, should be multiples of 16')
parser.add_argument('--width', type=int, default=1280,
help='width for the tensorflow placeholder, should be multiple of 16 for 3 scales')
parser.add_argument('--input_path', type=str, default='./testing_set',
help='input path for testing images')
parser.add_argument('--output_path', type=str, default='./testing_res',
help='output path for testing images')
args = parser.parse_args()
return args
def main(_):
args = parse_args()
# set gpu/cpu mode
if int(args.gpu_id) >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
else:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# set up deblur models
deblur = model.DEBLUR(args)
if args.phase == 'test':
deblur.test(args.height, args.width, args.input_path, args.output_path)
elif args.phase == 'train':
deblur.train()
else:
print('phase should be set to either test or train')
if __name__ == '__main__':
tf.compat.v1.app.run()
| 41.960784 | 109 | 0.664019 |
c15207c76b7da73cc1243d8abe9e8a479041157b | 1,116 | py | Python | grin-py/grinlib/pool.py | davidftv/grin-pool | 800ff06e541a06726957fb323a4a8b982d70d371 | [
"Apache-2.0"
] | null | null | null | grin-py/grinlib/pool.py | davidftv/grin-pool | 800ff06e541a06726957fb323a4a8b982d70d371 | [
"Apache-2.0"
] | null | null | null | grin-py/grinlib/pool.py | davidftv/grin-pool | 800ff06e541a06726957fb323a4a8b982d70d371 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Routines for getting Grin Pool data
#
import sys
import time
import requests
import json
from grinlib import lib
from grinbase.model.pool_blocks import Pool_blocks
# Share graph rate
def calculate_graph_rate(difficulty, ts1, ts2, n):
# gps = 42 * (diff/scale) / 60
# XXX TODO: Assumes cuckoo 30 for all blocks - Fixes for cuckatoos?
scale = 29.0
timedelta = (ts2 - ts1).total_seconds()
if n == 0 or timedelta == 0:
return 0
gps = (42.0 * float(n)) / float(timedelta)
return gps
| 29.368421 | 74 | 0.721326 |
762f8ee5dc1c607e6a9fd41fa39d4869cc3803b5 | 7,345 | py | Python | tools_2/inferring/flask_infer.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-12T13:29:54.000Z | 2021-04-12T13:29:54.000Z | tools_2/inferring/flask_infer.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-12T13:31:27.000Z | 2021-04-12T13:33:10.000Z | tools_2/inferring/flask_infer.py | hukefei/chongqing_contest | c38ae3e6f25230282c65cdd568de93f28e88c6d6 | [
"Apache-2.0"
] | 1 | 2021-04-21T10:14:15.000Z | 2021-04-21T10:14:15.000Z | #!/usr/bin/env python
# encoding:utf-8
"""
author: liusili
@l@icense: (C) Copyright 2019, Union Big Data Co. Ltd. All rights reserved.
@contact: liusili@unionbigdata.com
@software:
@file: flask_infer
@time: 2019/12/2
@desc:
"""
import os
import cv2
import numpy as np
import time, datetime
from mmdet.apis import init_detector, inference_detector, show_result
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
basedir = '/home/Visionox/V3/OLED_deploy/'
model = None
labels = []
# NMS_THD = 0.55
NMS_THD = 0.3
def initModel():
global model
global labels
config_file = basedir + 'config_OLED.py'
checkpoint_file = basedir + 'v3_oled_deploy.pth'
for line in open(basedir + 'classes.txt', "r"):
lineTemp = line.strip()
if lineTemp:
labels.append(lineTemp)
model = init_detector(config_file, checkpoint_file, device='cuda:0')
def NMS(bboxes, score, thresh):
"""Pure Python NMS baseline."""
# bounding box and score
boxes = np.array(bboxes)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = np.array(score)
# the area of candidate
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# score in descending order
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
# Calculate the intersection between current box and other boxes
# using numpy->broadcast, obtain vector
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# intersection area, return zero if no intersection
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# IOU:intersection area /(area1+area2-intersection area)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# find out box the overlap ratio smaller than threshold
inds = np.where(ovr <= thresh)[0]
# update order
order = order[inds + 1]
return keep
def selectClsScoreBoxFromResult(result, cls_names):
assert isinstance(cls_names, (tuple, list))
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)]
labels = np.concatenate(labels)
selectedCls = []
selectedScore = []
selectedBox = []
assert (len(labels) == len(bboxes))
for i in range(0, len(labels)):
# selectedResult.append([cls_names[labels[i]], bboxes[i][-1]])
selectedCls.append(cls_names[labels[i]])
selectedScore.append(bboxes[i][-1])
tempBox = []
tempBox = bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][3]
selectedBox.append(tempBox)
return selectedCls, selectedScore, selectedBox
def infer(imagepath, outpath):
global model
img_name = imagepath.split('/')[-1]
img = open(imagepath, 'rb').read()
if img == None:
print('img is none')
nparr = np.fromstring(img, np.uint8)
img_np = cv2.imdecode(nparr, 1)
# 边缘裁剪
img_np = img_np[:, :1228, :]
# opzealot
height = img_np.shape[0]
width = img_np.shape[1]
sys_time = int(int(round(time.time() * 1000)))
cur_dir = os.getcwd()
localtime = time.localtime(time.time())
result = {}
result['defect'] = 0
try:
out = inference_detector(model, img_np)
print(out)
log_codes = []
log_scores = []
bboxs = []
log_codes, log_scores, bboxs = selectClsScoreBoxFromResult(out, labels)
if len(log_codes) != 0:
result['defect'] = 1
validResult = np.arange(0, len(bboxs))
if len(bboxs) > 1:
validResult = NMS(bboxs, log_scores, NMS_THD)
for index in validResult:
# ignore edges codes
xmin = bboxs[index][0]
ymin = bboxs[index][1]
xmax = bboxs[index][2]
ymax = bboxs[index][3]
center_x = (xmin + xmax) // 2
center_y = (ymin + ymax) // 2
if center_x < 100 or center_y < 100 or center_x > width - 100 \
or center_y > height - 100:
log_scores[index] = 0
if log_scores[index] > 0:
cv2.rectangle(img_np, (bboxs[index][0], bboxs[index][1]),
(bboxs[index][2], bboxs[index][3]), (0, 255, 255), thickness=2)
strText = str(log_codes[index]) + ': ' + str(log_scores[index])
cv2.putText(img_np, strText, (bboxs[index][0], bboxs[index][1]),
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 0), 2)
target_img_dir = outpath
os.makedirs(target_img_dir, exist_ok=True)
target_img_file_path = os.path.join(target_img_dir, img_name)
cv2.imwrite(target_img_file_path, img_np)
result['log_codes'] = log_codes
result['log_score'] = str(log_scores)
out_label = None
out_score = None
out_bbox = None
if len(log_scores) == 0:
out_label = 'Others'
out_score = str(0.0)
out_bbox = None
else:
out_score = max(log_scores)
out_label = log_codes[log_scores.index(out_score)]
out_bbox = bboxs[log_scores.index(out_score)]
if out_score < 0.4:
out_label = 'Others'
# opzealot set the background threshold
if out_score < 0.01:
out_label = 'OK'
out_score = 0.99
result['img_cls'] = out_label
result['img_score'] = str(out_score)
result['detect_begin_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
result['detect_cost_time'] = '{:.2f}'.format(int(int(round(time.time() * 1000))) - sys_time)
result['savepath'] = imagepath.replace('input', 'result')
print(result)
else:
target_img_dir = outpath
os.makedirs(target_img_dir, exist_ok=True)
target_img_file_path = os.path.join(target_img_dir, img_name)
cv2.imwrite(target_img_file_path, img_np)
print('save image to {}'.format(target_img_file_path))
result['defect'] = 1
result['img_cls'] = 'OK'
result['img_score'] = str(0.99)
result['detect_begin_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
result['detect_cost_time'] = '{:.2f}'.format(int(int(round(time.time() * 1000))) - sys_time)
result['savepath'] = target_img_file_path
print(result)
except Exception as e:
result = {}
result['defect'] = 0
print(e)
if __name__ == '__main__':
initModel()
imagepath = '/home/Visionox/V3/OLED_deploy/OLED_test/L2E9A23A7151AA_-690771_331328_MID_A_BF_BL_R_REV.jpg'
outpath = '/home/Visionox/V3/OLED_deploy/output'
infer(imagepath, outpath) | 34.646226 | 109 | 0.567325 |
44d1297cdfe6b2143cdd41be7c0d59cf55fc4942 | 1,452 | py | Python | common/error.py | voloshanenko/smsgateway | a1509c7b98d844bc483a47173223062e6e1c2bc6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | common/error.py | voloshanenko/smsgateway | a1509c7b98d844bc483a47173223062e6e1c2bc6 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-03-26T00:43:30.000Z | 2022-03-29T22:03:58.000Z | common/error.py | voloshanenko/smsgateway | a1509c7b98d844bc483a47173223062e6e1c2bc6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2015 Neuhold Markus and Kleinsasser Mario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, "..")
class Error(Exception):
"""Base class for exceptions"""
pass
class UserNotFoundError(Error):
pass
class ConfigError(Error):
"""Exception raised for errors in configuration handinling.
Attributes:
message -- explanation of the error
baseexception -- forwarding the base excepton cached
"""
def __init__(self, message, baseexception):
self.message = message
self.baseexcepton = baseexception
class DatabaseError(Error):
"""Exception raised for errors in database handinling.
Attributes:
message -- explanation of the error
baseexception -- forwarding the base excepton cached
"""
def __init__(self, message, baseexception):
self.message = message
self.baseexcepton = baseexception
| 28.470588 | 74 | 0.714187 |
9f4e08b1c92ecbf7f11f38d3b61af74f28121e96 | 395 | py | Python | scripts/op_data_test.py | smilelight/lightLabel | 18aeaf49e1aba7c8857f6182e52f9418651d15dc | [
"Apache-2.0"
] | 3 | 2020-02-21T10:55:20.000Z | 2021-03-31T18:12:20.000Z | scripts/op_data_test.py | smilelight/lightLabel | 18aeaf49e1aba7c8857f6182e52f9418651d15dc | [
"Apache-2.0"
] | null | null | null | scripts/op_data_test.py | smilelight/lightLabel | 18aeaf49e1aba7c8857f6182e52f9418651d15dc | [
"Apache-2.0"
] | 4 | 2020-02-27T12:24:38.000Z | 2021-07-09T08:33:17.000Z | # -*- coding: utf-8 -*-
# @Time : 2020/2/17 15:23
# @Author : lightsmile
# @FileName: op_data_test.py
# @Software: PyCharm
from lightlabel.db.mongodb import MongoDB
db = MongoDB('light_label_corpus', 'ttt_demo')
result = db.update({
'raw_data.word': '李白'
}, {'labeled_data': '诗人'})
print(result)
print(db.query({
'raw_data.word': '李白'
}))
for item in db.get_all():
print(item)
| 20.789474 | 46 | 0.64557 |
5818cb554a8babbbe4d9ded9f26da106e112acdb | 565 | py | Python | bitwise.py | kigane/opencv-tutorial | 90f21e0a82a6db00b1c3b3982891eeac477beb3c | [
"MIT"
] | null | null | null | bitwise.py | kigane/opencv-tutorial | 90f21e0a82a6db00b1c3b3982891eeac477beb3c | [
"MIT"
] | null | null | null | bitwise.py | kigane/opencv-tutorial | 90f21e0a82a6db00b1c3b3982891eeac477beb3c | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
img = cv.imread('images/cat.jpg')
resized = cv.resize(img, (400, 400), interpolation=cv.INTER_AREA)
blank = np.zeros((400, 400), dtype='uint8')
rect = cv.rectangle(blank.copy(), (30, 30), (370, 370), 255, thickness=-1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, thickness=-1)
cv.imshow('Rectangle', rect)
cv.imshow('Circle', circle)
weired = cv.bitwise_and(rect, circle)
cv.imshow('Bitwise and', weired)
weiredImg = cv.bitwise_and(resized, resized, mask=weired)
cv.imshow('Weired Img', weiredImg)
cv.waitKey(0) | 26.904762 | 74 | 0.700885 |
2fbebc00fe352812eb28e80ce0420acf7df63bba | 35,015 | py | Python | trankit/adapter_transformers/modeling_roberta.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 613 | 2021-01-12T14:21:13.000Z | 2022-03-29T19:51:47.000Z | trankit/adapter_transformers/modeling_roberta.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 38 | 2021-01-13T12:01:15.000Z | 2022-03-31T14:13:44.000Z | trankit/adapter_transformers/modeling_roberta.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 77 | 2021-01-13T07:33:26.000Z | 2022-03-29T19:51:50.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import logging
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from .adapter_bert import BertModelHeadsMixin
from .adapter_model_mixin import ModelWithHeadsAdaptersMixin
from .adapter_utils import parse_adapter_names
from .configuration_roberta import RobertaConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu
from .modeling_utils import create_position_ids_from_input_ids
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
return super().forward(
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds
)
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
""" We are provided embeddings directly. We cannot infer which are padded so just generate
sequential position ids.
:param torch.Tensor inputs_embeds:
:return torch.Tensor:
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
ROBERTA_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.RobertaTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(BertModel):
"""
This class overrides :class:`~transformers.BertModel`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@add_start_docstrings(
"""Roberta Model transformer with the option to add multiple flexible heads on top.""", ROBERTA_START_DOCSTRING,
)
class RobertaModelWithHeads(BertModelHeadsMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self._init_head_modules()
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
adapter_names=None,
head=None,
):
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
adapter_names=adapter_names,
)
outputs = self.forward_head(outputs, head_name=head, attention_mask=attention_mask, labels=labels,)
return outputs
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(ModelWithHeadsAdaptersMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
adapter_names=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMaskedLM
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
# TODO: Currently no fusion over invertible adapters, takes only very first language adapter position
if adapter_names is not None and len(adapter_names) > 0:
adapter_names = parse_adapter_names(adapter_names)
language = adapter_names[0][0]
else:
language = None
prediction_scores = self.lm_head(
sequence_output, inv_lang_adapter=self.roberta.get_invertible_lang_adapter(language),
)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, inv_lang_adapter=None, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
if inv_lang_adapter:
x = inv_lang_adapter(x, rev=True)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@add_start_docstrings(
"""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(ModelWithHeadsAdaptersMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForSequenceClassification
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Roberta Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(ModelWithHeadsAdaptersMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape ``(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMultipleChoice
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMultipleChoice.from_pretrained('roberta-base')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
adapter_names=adapter_names,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Roberta Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(ModelWithHeadsAdaptersMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForTokenClassification
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForTokenClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(ModelWithHeadsAdaptersMixin, BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
adapter_names=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint roberta-large is not fine-tuned for question answering. Please see the
# examples/question-answering/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import RobertaTokenizer, RobertaForQuestionAnswering
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForQuestionAnswering.from_pretrained('roberta-base')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_ids = tokenizer.encode(question, text)
start_scores, end_scores = model(torch.tensor([input_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 45.65189 | 150 | 0.658089 |
ceadc210b1e5f8e05ce931c695ff14c2ca5babbe | 235 | py | Python | chapter16/example/example14.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter16/example/example14.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | chapter16/example/example14.py | YordanIH/Intro_to_CS_w_Python | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | [
"MIT"
] | null | null | null | import tkinter
window = tkinter.Tk()
frame = tkinter.Frame(window)
frame.pack()
label = tkinter.Label(frame, text='Name:')
label.grid(row = 0, column = 0)
entry = tkinter.Entry(frame)
entry.grid(row = 1, column = 1)
window.mainloop() | 21.363636 | 42 | 0.706383 |
a1fd979bf19333a73509a82a93c32dfcce5beea3 | 8,350 | py | Python | pghoard/rohmu/delta/common.py | st3fan/sphinx-automation-experiment | c92c8400770c6c604e2451e4f1e71957fc4c5ef8 | [
"Apache-2.0"
] | 731 | 2018-06-01T21:48:43.000Z | 2022-03-29T08:21:42.000Z | pghoard/rohmu/delta/common.py | st3fan/sphinx-automation-experiment | c92c8400770c6c604e2451e4f1e71957fc4c5ef8 | [
"Apache-2.0"
] | 124 | 2018-06-19T05:59:50.000Z | 2022-03-31T18:17:59.000Z | pghoard/rohmu/delta/common.py | st3fan/sphinx-automation-experiment | c92c8400770c6c604e2451e4f1e71957fc4c5ef8 | [
"Apache-2.0"
] | 64 | 2018-06-26T14:12:53.000Z | 2022-03-20T07:33:33.000Z | # Copyright (c) 2021 Aiven, Helsinki, Finland. https://aiven.io/
import functools
import hashlib
import json as _json
import logging
import math
import os
from datetime import datetime
from multiprocessing.dummy import Pool
from pathlib import Path
from typing import List, Optional
from pydantic import BaseModel, Field
from pghoard.rohmu.dates import now
_hash = hashlib.blake2s
_log_1_1 = math.log(1.1)
# Hexdigest is 32 bytes, so something orders of magnitude more at least
EMBEDDED_FILE_SIZE = 150
logger = logging.getLogger(__name__)
def hash_hexdigest_readable(f, *, read_buffer=1_000_000):
h = _hash()
while True:
data = f.read(read_buffer)
if not data:
break
h.update(data)
return h.hexdigest()
def increase_worth_reporting(value, new_value=None, *, total=None):
""" Make reporting sparser and sparser as values grow larger
- report every 1.1**N or so
- if we know total, report every percent
"""
if new_value is None:
new_value = value
value = new_value - 1
if total is not None:
if new_value == total or total <= 100:
return True
old_percent = 100 * value // total
new_percent = 100 * new_value // total
return old_percent != new_percent
if value <= 10 or new_value <= 10:
return True
old_exp = int(math.log(value) / _log_1_1)
new_exp = int(math.log(new_value) / _log_1_1)
return old_exp != new_exp
class DeltaModel(BaseModel):
class Config:
# As we're keen to both export and decode json, just using
# enum values for encode/decode is much saner than the default
# enumname.value (it is also slightly less safe but oh well)
use_enum_values = True
# Extra values should be errors, as they are most likely typos
# which lead to grief when not detected. However, if we ever
# start deprecating some old fields and not wanting to parse
# them, this might need to be revisited.
extra = "forbid"
# Validate field default values too
validate_all = True
# Validate also assignments
# validate_assignment = True
# TBD: Figure out why this doesn't work in some unit tests;
# possibly the tests themselves are broken
def jsondict(self, **kw):
# By default,
#
# .json() returns json string.
# .dict() returns Python dict, but it has things that are not
# json serializable.
#
# We provide json seralizable dict (super inefficiently) here.
#
# This is mostly used for test code so that should be fine
return _json.loads(self.json(**kw))
class SizeLimitedFile:
def __init__(self, *, path, file_size):
self._f = open(path, "rb")
self._file_size = file_size
self.tell = self._f.tell
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self._f.close()
def read(self, n=None):
can_read = max(0, self._file_size - self._f.tell())
if n is None:
n = can_read
n = min(can_read, n)
return self._f.read(n)
def seek(self, ofs, whence=0):
if whence == os.SEEK_END:
ofs += self._file_size
whence = os.SEEK_SET
return self._f.seek(ofs, whence)
class SnapshotHash(DeltaModel):
"""
This class represents something that is to be stored in the object storage.
size is provided mainly to allow for even loading of nodes in case
same hexdigest is available from multiple nodes.
"""
hexdigest: str
size: int
def __eq__(self, other):
if isinstance(other, SnapshotHash):
return self.hexdigest == other.hexdigest
return False
def __hash__(self):
# hexdigests should be unique, regardless of size
return hash(self.hexdigest)
@functools.total_ordering
class SnapshotFile(DeltaModel):
relative_path: Path
file_size: int
stored_file_size: int
mtime_ns: int
hexdigest: str = ""
content_b64: Optional[str]
def __lt__(self, o):
# In our use case, paths uniquely identify files we care about
return self.relative_path < o.relative_path
def equals_excluding_mtime(self, o):
return self.copy(update={"mtime_ns": 0}) == o.copy(update={"mtime_ns": 0})
def open_for_reading(self, root_path):
return SizeLimitedFile(path=root_path / self.relative_path, file_size=self.file_size)
class SnapshotState(DeltaModel):
root_globs: List[str]
files: List[SnapshotFile]
empty_dirs: List[Path]
class SnapshotResult(DeltaModel):
# when was the operation started ( / done )
start: datetime = Field(default_factory=now)
end: Optional[datetime]
#
# should be passed opaquely to restore
state: Optional[SnapshotState]
#
# Summary data for manifest use
files: int = 0
total_size: int = 0
# populated only if state is available
hashes: Optional[List[SnapshotHash]]
class SnapshotUploadResult(DeltaModel):
total_size: int = 0
total_stored_size: int = 0
class BackupManifest(DeltaModel):
start: datetime
end: datetime = Field(default_factory=now)
# Filesystem snapshot contents of the backup
snapshot_result: SnapshotResult
# What did the upload return (mostly for statistics)
upload_result: SnapshotUploadResult
class Progress(DeltaModel):
""" JSON-encodable progress meter of sorts """
handled: int = 0
failed: int = 0
total: int = 0
final: bool = False
def __repr__(self):
finished = ", finished" if self.final else ""
return f"{self.handled}/{self.total} handled, {self.failed} failures{finished}"
def start(self, n):
" Optional 'first' step, just for logic handling state (e.g. no progress object reuse desired) "
assert not self.total
logger.debug("start")
self.add_total(n)
def add_total(self, n):
if not n:
return
old_total = self.total
self.total += n
if increase_worth_reporting(old_total, self.total):
logger.debug("add_total %r -> %r", n, self)
assert not self.final
def add_fail(self, n=1, *, info="add_fail"):
assert n > 0
old_failed = self.failed
self.failed += n
if increase_worth_reporting(old_failed, self.failed):
logger.debug("%s %r -> %r", info, n, self)
assert not self.final
def add_success(self, n=1, *, info="add_success"):
assert n > 0
old_handled = self.handled
self.handled += n
assert self.handled <= self.total
if increase_worth_reporting(old_handled, self.handled, total=self.total):
logger.debug("%s %r -> %r", info, n, self)
assert not self.final
def download_success(self, size):
self.add_success(size, info="download_success")
def upload_success(self, hexdigest):
self.add_success(info=f"upload_success {hexdigest}")
def upload_missing(self, hexdigest):
self.add_fail(info=f"upload_missing {hexdigest}")
def upload_failure(self, hexdigest):
self.add_fail(info=f"upload_failure {hexdigest}")
def done(self):
assert self.total is not None and self.handled <= self.total
assert not self.final
self.final = True
logger.debug("done %r", self)
@property
def finished_successfully(self):
return self.final and not self.failed and self.handled == self.total
@property
def finished_failed(self):
return self.final and not self.finished_successfully
@classmethod
def merge(cls, progresses):
p = cls()
for progress in progresses:
p.handled += progress.handled
p.failed += progress.failed
p.total += progress.total
p.final = all(progress.final for progress in progresses)
return p
def parallel_map_to(*, fun, iterable, result_callback, n=None) -> bool:
iterable_as_list = list(iterable)
with Pool(n) as p:
for map_in, map_out in zip(iterable_as_list, p.imap(fun, iterable_as_list)):
if not result_callback(map_in=map_in, map_out=map_out):
return False
return True
| 29.5053 | 104 | 0.64491 |
e8c118c89929ea78169e5f4b0c728f4b402f226d | 5,639 | py | Python | crazyflie-lib-python/examples/basiclog.py | manikamakam/swarm | 3d3f4692f1969e0973fa8929660a8d0da53cafa7 | [
"MIT"
] | null | null | null | crazyflie-lib-python/examples/basiclog.py | manikamakam/swarm | 3d3f4692f1969e0973fa8929660a8d0da53cafa7 | [
"MIT"
] | null | null | null | crazyflie-lib-python/examples/basiclog.py | manikamakam/swarm | 3d3f4692f1969e0973fa8929660a8d0da53cafa7 | [
"MIT"
] | 1 | 2019-12-02T01:00:18.000Z | 2019-12-02T01:00:18.000Z | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Simple example that connects to the first Crazyflie found, logs the Stabilizer
and prints it to the console. After 10s the application disconnects and exits.
"""
import logging
import time
from threading import Timer
import cflib.crtp # noqa
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
# Only output errors from the logging framework
logging.basicConfig(level=logging.ERROR)
class LoggingExample:
"""
Simple logging example class that logs the Stabilizer from a supplied
link uri and disconnects after 5s.
"""
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
self._cf = Crazyflie(rw_cache='./cache')
# Connect some callbacks from the Crazyflie API
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
print('Connecting to %s' % link_uri)
# Try to connect to the Crazyflie
self._cf.open_link(link_uri)
# Variable used to keep main loop occupied until disconnect
self.is_connected = True
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
# The definition of the logconfig can be made before connecting
self._lg_stab = LogConfig(name='Stabilizer', period_in_ms=10)
self._lg_stab.add_variable('stabilizer.roll', 'float')
self._lg_stab.add_variable('stabilizer.pitch', 'float')
self._lg_stab.add_variable('stabilizer.yaw', 'float')
# Adding the configuration cannot be done until a Crazyflie is
# connected, since we need to check that the variables we
# would like to log are in the TOC.
try:
self._cf.log.add_config(self._lg_stab)
# This callback will receive the data
self._lg_stab.data_received_cb.add_callback(self._stab_log_data)
# This callback will be called on errors
self._lg_stab.error_cb.add_callback(self._stab_log_error)
# Start the logging
self._lg_stab.start()
except KeyError as e:
print('Could not start log configuration,'
'{} not found in TOC'.format(str(e)))
except AttributeError:
print('Could not add Stabilizer log config, bad configuration.')
# Start a timer to disconnect in 10s
t = Timer(5, self._cf.close_link)
t.start()
def _stab_log_error(self, logconf, msg):
"""Callback from the log API when an error occurs"""
print('Error when logging %s: %s' % (logconf.name, msg))
def _stab_log_data(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
print('[%d][%s]: %s' % (timestamp, logconf.name, data))
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the speficied address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
self.is_connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
self.is_connected = False
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.init_drivers(enable_debug_driver=False)
# Scan for Crazyflies and use the first one found
print('Scanning interfaces for Crazyflies...')
available = cflib.crtp.scan_interfaces()
print('Crazyflies found:')
for i in available:
print(i[0])
if len(available) > 0:
le = LoggingExample(available[0][0])
else:
print('No Crazyflies found, cannot run example')
# The Crazyflie lib doesn't contain anything to keep the application alive,
# so this is where your application should do something. In our case we
# are just waiting until we are disconnected.
while le.is_connected:
time.sleep(1)
| 39.159722 | 79 | 0.665721 |
308dc35faf80171297ec21cc02646b4db9909458 | 1,623 | py | Python | setup.py | JojoDevel/depthai-lightning | 8146c477a62b9b6ffe4cd4b222988441381bdb62 | [
"MIT"
] | null | null | null | setup.py | JojoDevel/depthai-lightning | 8146c477a62b9b6ffe4cd4b222988441381bdb62 | [
"MIT"
] | null | null | null | setup.py | JojoDevel/depthai-lightning | 8146c477a62b9b6ffe4cd4b222988441381bdb62 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open("README.md", encoding="utf-8") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst", encoding="utf-8") as history_file:
history = history_file.read()
with open("requirements.txt", encoding="utf-8") as requirements_file:
requirements = requirements_file.readlines()
test_requirements = []
setup(
author="JojoDevel",
author_email="14841215+JojoDevel@users.noreply.github.com",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
"console_scripts": [
"depthai_lightning=depthai_lightning.cli:main",
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="depthai_lightning",
name="depthai_lightning",
packages=find_packages(include=["depthai_lightning", "depthai_lightning.*"]),
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/JojoDevel/depthai_lightning",
version="0.0.1",
zip_safe=False,
)
| 31.823529 | 103 | 0.666051 |
76e60cc7cb9b157fb1d801cd980cdd4b381878db | 1,415 | py | Python | schoolport/app_core/migrations/0004_auto_20210505_1932.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0004_auto_20210505_1932.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0004_auto_20210505_1932.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-05 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_core', '0003_auto_20210505_1923'),
]
operations = [
migrations.CreateModel(
name='TB_Param_Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_name', models.CharField(max_length=255, verbose_name='Name of Course')),
('course_type', models.CharField(max_length=50, null=True, verbose_name='Course Type')),
('price', models.FloatField(null=True)),
('price_unit', models.CharField(max_length=50, null=True)),
('price_currency', models.CharField(max_length=20, null=True)),
],
),
migrations.RemoveField(
model_name='tb_course',
name='school_no',
),
migrations.AlterField(
model_name='tb_student',
name='alt_parent_phone_no',
field=models.CharField(max_length=20, null=True, verbose_name='Alt Parent Phone No'),
),
migrations.AlterField(
model_name='tb_student',
name='alt_parent_type',
field=models.CharField(max_length=20, null=True, verbose_name='Alt Parent Type'),
),
]
| 36.282051 | 114 | 0.592226 |
12a485bdd58c047248e781c48c1e18281a531810 | 953 | py | Python | openstack_dashboard/dashboards/project/volumes/snapshots/urls.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/volumes/snapshots/urls.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/volumes/snapshots/urls.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.volumes.snapshots import views
urlpatterns = patterns('',
url(r'^(?P<snapshot_id>[^/]+)$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<snapshot_id>[^/]+)/update/$',
views.UpdateView.as_view(),
name='update'),
)
| 35.296296 | 78 | 0.695698 |
8505ec94b4c206aaa6a6593ccc9d3c1ea28d3a2e | 1,265 | py | Python | setup.py | mtanghu/URF | 4c541e9b20b0e3faabdae84b956faf61a8ef31d7 | [
"Apache-2.0"
] | 7 | 2018-08-27T13:11:02.000Z | 2022-03-19T18:38:50.000Z | setup.py | mtanghu/URF | 4c541e9b20b0e3faabdae84b956faf61a8ef31d7 | [
"Apache-2.0"
] | null | null | null | setup.py | mtanghu/URF | 4c541e9b20b0e3faabdae84b956faf61a8ef31d7 | [
"Apache-2.0"
] | 2 | 2018-08-27T13:11:07.000Z | 2022-02-08T23:20:51.000Z | from setuptools import setup
from codecs import open
from os import path
import re
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='URF',
version=find_version("URF", "__init__.py"),
install_requires=['scikit-learn>=0.17', 'numpy>=1.9.0', 'scipy', 'pycluster'],
url='https://github.com/liyao001/URF',
packages=["URF", ],
license='Apache 2.0',
author='Li Yao',
author_email='yaol17@mails.tsinghua.edu.cn',
description='Unsupervised Random Forest (Random Forest Clustering)',
long_description=long_description,
classifiers=[
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'
],
)
| 28.75 | 82 | 0.640316 |
7b198b911f4442f8907c8728e9f23a5468a7fedf | 1,007 | py | Python | Chapter08/casestudy/casestudy2.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | 31 | 2020-08-10T22:37:41.000Z | 2022-03-09T21:35:56.000Z | Chapter08/casestudy/casestudy2.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | null | null | null | Chapter08/casestudy/casestudy2.py | MichaelRW/Python-for-Geeks | a111f61f1a0b077fc0524431e1ccefd9214d5c53 | [
"MIT"
] | 21 | 2020-08-10T22:37:44.000Z | 2022-03-07T07:26:28.000Z | #casestudy2.py: word count application
import matplotlib.pyplot as plt
from pyspark.sql import SparkSession
from wordcloud import WordCloud
spark = SparkSession.builder.master("local[*]")\
.appName("word cloud app")\
.getOrCreate()
wc_threshold = 1
wl_threshold = 3
textRDD = spark.sparkContext.textFile('wordcloud.txt',3)
flatRDD = textRDD.flatMap(lambda x: x.split(' '))
wcRDD = flatRDD.map(lambda word: (word, 1)).\
reduceByKey(lambda v1, v2: v1 + v2)
# filter out words with fewer than threshold occurrences
filteredRDD = wcRDD.filter(lambda pair: pair[1] >= wc_threshold)
filteredRDD2 = filteredRDD.filter(lambda pair:
len(pair[0]) > wl_threshold)
word_freq = dict(filteredRDD2.collect())
# Create the wordcloud object
wordcloud = WordCloud(width=480, height=480, margin=0).\
generate_from_frequencies(word_freq)
# Display the generated cloud image
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
| 30.515152 | 64 | 0.724926 |
f17e11c307eded55515afbd23062f63088f6d001 | 9,370 | py | Python | scripts/dvas-integration.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | null | null | null | scripts/dvas-integration.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | 5 | 2020-08-27T12:34:08.000Z | 2021-09-28T14:49:20.000Z | scripts/dvas-integration.py | actris-cloudnet/data-processing | 8ab6fccd5cf48e10e985addcf339b9698a9b09cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
from sys import argv
from urllib.error import HTTPError
from data_processing import utils
from datetime import datetime
import re
import requests
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def create_title(file):
return f"{file['product']['humanReadableName']} file from {file['site']['humanReadableName']} on {file['measurementDate']}"
def parse_affiliation(site):
affiliation = ['CLOUDNET']
if 'arm' in site['type']:
affiliation.append('ARM')
if 'cloudnet' in site['type']:
affiliation.append('ACTRIS_NRT')
return affiliation
def parse_instrument_type(product):
if product['id'] == 'model':
return ['UNKNOWN']
elif product['level'] == '1b':
return [product['id']]
else:
return ['radar', 'lidar', 'mwr']
def main():
lastsuccesspath = argv[1]
lastsuccessfile = open(lastsuccesspath, 'r')
lines = lastsuccessfile.readlines()
lastsuccessfile.close()
startdate = lines[0].strip()
enddate = datetime.now().isoformat()
products = ['classification', 'lwc', 'iwc', 'drizzle']
payload = dict(updatedAtFrom=startdate, updatedAtTo=enddate, volatile=False, product=products, showLegacy=True)
logging.info(f'GET {payload}')
variables = utils.get_from_data_portal_api('api/products/variables')
if 'status' in variables and variables['status'] >= 300:
raise requests.HTTPError(variables['errors'][0])
files = utils.get_from_data_portal_api('api/files', payload)
if 'status' in files and files['status'] >= 300:
raise requests.HTTPError(files['errors'][0])
logging.info(f'About to upload {len(files)} metadata entries.')
s = requests.Session()
s.headers.update({'X-Authorization': f"Bearer {os.environ['DVAS_PORTAL_TOKEN']}"})
for file in files:
site = file['site']
product = file['product']
site_dvas_id = site['dvasId']
file_vars = list(filter(lambda var: var['id'] == product['id'], variables))
var_ids = list(map(lambda var: re.sub(r'.*-', '', var['id']), file_vars[0]['variables']))
pid = { 'id': file['pid'].replace('https://hdl.handle.net/', ''), 'type': 'Handle' } if file['pid'] else { 'id': None, 'type': 'N/A' }
if not site_dvas_id: # Skip sites that are not in dvas db
continue
logging.info(f'POST {file["filename"]}')
actris_json = {
'md_metadata': { # mandatory
'file_identifier': file['filename'],
'language': 'en', # mandatory
'hierarchy_level': 'dataset', # mandatory, fixed list ['attribute','attributeType','collectionHardware','collectionSession','dataset','series','nonGeographicDataset','dimensionGroup','feature','featureType','propertyType','fieldSession','software','service','model','tile']
'datestamp': datetime.now().isoformat(), # mandatory
'contact': [{ # mandatory
'first_name': 'Ewan', # mandatory
'last_name': 'O\'Connor', # mandatory
'organisation_name': 'Finnish Meteorological Institute (FMI)', # mandatory
'role_code': ['pointOfContact'], # mandatory, fixed list ['resourceProvider','custodian','owner,'user,'distributor,'originator,'pointOfContact,'principalInvestigator,'processor,'publisher,'author]
'country': 'Finland', # mandatory
'country_code': 'FI'
}],
'online_resource': { # mandatory
'linkage': 'https://cloudnet.fmi.fi/' # mandatory
}
},
'md_identification': { # mandatory
'abstract': create_title(file), # mandatory
'title': create_title(file), # mandatory
'identifier': pid, # optional
'date': file['measurementDate'], # mandatory
'date_type': 'creation', # mandatory, fixed list ['publication', 'revision', 'creation'
'contact': [{ # mandatory
'first_name': 'Simo', # mandatory
'last_name': 'Tukiainen', # mandatory
'organisation_name': 'Finnish Meteorological Institute (FMI)', # mandatory
'role_code': ['processor'], # mandatory, see fixed list in example above
'country': 'Finland', # mandatory
'country_code': 'FI'
}],
'online_resource': { # mandatory
'linkage': f"https://cloudnet.fmi.fi/file/{file['uuid']}" # mandatory
}
},
'md_constraints': { # mandatory
'access_constraints': 'otherRestrictions', # mandatory
'use_constraints': 'otherRestrictions', # mandatory
'other_constraints': 'http://actris.nilu.no/Content/Documents/DataPolicy.pdf', # mandatory
},
'md_keywords': { # mandatory
'keywords': ['FMI', 'ACTRIS', product['humanReadableName']] # mandatory, limit on 60 character keyword
},
'md_data_identification': { # mandatory
'language': 'en', # mandatory
'topic_category': 'climatologyMeteorologyAtmosphere', # mandatory
'description': 'time series of point measurements', # mandatory
'station_identifier': site_dvas_id # mandatory, fixed list will be provided
},
'ex_geographic_bounding_box': { # mandatory
'west_bound_longitude': site['longitude'], # mandatory
'east_bound_longitude': site['longitude'], # mandatory
'south_bound_latitude': site['latitude'], # mandatory
'north_bound_latitude': site['latitude'] # mandatory
},
'ex_temporal_extent': { # mandatory
'time_period_begin': file['measurementDate'], # mandatory
'time_period_end': file['measurementDate'] # mandatory
},
'ex_vertical_extent': { # optional, range of measurement, left out because we don't want to parse it
'minimum_value': None, # optional
'maximum_value': None, # optional
'unit_of_measure': 'm above sea level' # optional
},
'md_content_information': { # mandatory
'attribute_descriptions': list(map(lambda var: re.sub(r'[-_]', '.', var), var_ids)), # mandatory, list of parameters
'content_type': 'physicalMeasurement' # mandatory, fixed list ['image','thematicClassification','physicalMeasurement']
},
'md_distribution_information': { # mandatory
'data_format': file['format'], # mandatory
'version_data_format': file['format'], # mandatory
'transfersize': file['size'], # optional
'dataset_url': file['downloadUrl'], # mandatory
'protocol': 'http', # mandatory, fixed list ['http','opendap']
'description': 'Direct download of data file', # optional
'function': 'download', # mandatory
'restriction': {
'set': False, # mandatory
}
},
'md_actris_specific': { # mandatory
'platform_type': 'surface_station', # mandatory ["surface_station", "simulation_chamber", "ballon"]
'product_type': 'model' if product['id'] == 'model' else 'observation', # mandatory ["model", "observation", "fundamental_parameter"]
'matrix': 'cloud', # mandatory ["cloud", "gas", "particle", "met"]
'sub_matrix': 'Unknown', # mandatory
'instrument_type': parse_instrument_type(product), # mandatory
'program_affiliation': parse_affiliation(site), # mandatory, fixed list ['ACTRIS', 'AMAP', 'AMAP_public','EUSAAR','EMEP','ACTRIS_preliminary','GAW-WDCA','GAW-WDCRG','NOAA-ESRL']
'legacy_data': file['legacy'], # mandatory
'data_level': product['level'][0], # mandatory, fixed list [0, 1, 2, 3]
'data_sublevel': product['level'][1] if 1 < len(product['level']) else None, # optional
'data_product': 'near-realtime-data' if file['quality'] == 'nrt' else 'quality assured data' # mandatory, need fixed list e.g. ['higher level data','quality assured data', 'near-realtime-data']
},
'dq_data_quality_information': { # optional
'level': 'dataset', # optional, fixed list ['attribute', 'attributeType', 'collectionHardware', 'collectionSession', 'dataset', 'series', 'nonGeographicDataset', 'dimensionGroup', 'feature', 'featureType', 'propertyType', 'fieldSession', 'software', 'service', 'model', 'tile']
},
}
res = s.post(f"{os.environ['DVAS_PORTAL_URL']}/Metadata/add", json=actris_json)
if not res.ok:
logging.error(f'{res.status_code} {res.text}')
exit(2)
filehandle = open(lastsuccesspath, 'w')
filehandle.write(enddate)
filehandle.close()
def add_arguments(subparser):
subparser.add_parser('dvas-upload', help='Upload Cloudnet data to DVAS data portal')
return subparser
if __name__ == "__main__":
if not len(argv) > 1:
logging.error('Last success file not specified')
exit(1)
if 'DVAS_PORTAL_URL' not in os.environ or 'DVAS_PORTAL_TOKEN' not in os.environ:
logging.error('DVAS_PORTAL_URL and DVAS_PORTAL_TOKEN must be specified as environment variables.')
exit(1)
main()
| 48.298969 | 289 | 0.611846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.