hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e03c5113f601a0e1e50ba9d228c167204cb5735 | 12,915 | py | Python | gridworld/envs/fourrooms_water.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | null | null | null | gridworld/envs/fourrooms_water.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | null | null | null | gridworld/envs/fourrooms_water.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | 2 | 2021-03-21T06:05:04.000Z | 2021-09-22T06:51:55.000Z | """
Fourrooms Game with water
class:
+ FourroomsWaterState
+ FourroomsWater
+ FourroomsWaterNorender
Properties:
Random waters garantee the rest space to be connected.
Resetting resets start position, goal position, coins and waters.
Each game has fixed number of coins and waters.
Update:
Now the agent walks two steps within a time step and the model can be random.
A model is a dict containing:
'water': one of 'pass', 'block', 'left', 'right', 'forward'
'coin': one of 'pass', 'left', 'right', 'forward'
'action': one of 'normal', 'left', 'right', 'inverse'
'extra step': one of 'stay', 'left', 'right', 'forward'
Basic: {'water': 'block', 'coin': 'pass', 'action': normal', 'extra step': 'stay'}
One model itself is deterministic.
A description is seen to the agent about the model.
Method 'play' is to play the game by hand.
Update:
New settings:
mode: {'train', 'test'}, use train model or test model, models in file 'train_model' and 'test_model'
easy_env: whether to use easy env, 3-5 coins and waters in train mode, 6-8 coins and waters in test mode
fix_pos: whether to fix initial position and goal
Possible extensions:
Each game has random number of coins and waters.
Length-variable discription
Variable action space
"""
from .fourrooms_coin import *
from ..utils.wrapper.wrappers import ImageInputWarpper
from copy import deepcopy
from ..utils.test_util import *
import os
import pickle
import bz2
dirpath = os.path.dirname(__file__)
train_file_path = os.path.join(dirpath, '../utils/env_utils/train_model')
test_file_path = os.path.join(dirpath, '../utils/env_utils/test_model')
train_file = bz2.BZ2File(train_file_path, 'r')
test_file = bz2.BZ2File(test_file_path, 'r')
train_list = pickle.load(train_file)
test_list = pickle.load(test_file)
fix_init = 11
fix_goal = 92
class FourroomsWaterState(FourroomsCoinState):
def __init__(self, position_n: int, current_steps: int, goal_n: int, done: bool, num_pos: int, coin_dict: dict,
num_coins, water_list: list, num_waters, cum_reward: list, description):
self.position_n = position_n
self.current_steps = current_steps
self.goal_n = goal_n
self.done = done
self.num_pos = num_pos
self.coin_dict = coin_dict
self.num_coins = num_coins
self.water_list = water_list
self.num_waters = num_waters
self.cum_reward = cum_reward
self.description = description
def __init__(self, base: FourroomsCoinState, water_list, num_waters, description):
self.position_n = base.position_n
self.current_steps = base.current_steps
self.goal_n = base.goal_n
self.done = base.done
self.num_pos = base.num_pos
self.coin_dict = base.coin_dict
self.num_coins = base.num_coins
self.water_list = water_list
self.num_waters = num_waters
self.cum_reward = base.cum_reward
self.description = description
def watered_state(self):
num_coins = len(self.coin_dict)
value_list = [(v[0] if v[1] else 0) for v in self.coin_dict.values()]
multiplier = np.dot(value_list, [2 ** i for i in range(num_coins)])
return multiplier * self.num_pos + self.position_n
def to_obs(self) -> np.array:
return np.array(self.watered_state())
class FourroomsWater(FourroomsCoin):
def __init__(self, Model=None, max_epilen=100, goal=None, num_coins=3, num_waters=3, seed=0, mode='train',
easy_env=True, fix_pos=True):
super(FourroomsCoin, self).__init__(max_epilen, goal, seed)
self.num_waters = num_waters
assert self.num_pos > (self.num_waters + 10), "too many waters."
self.num_coins = num_coins
assert (self.num_pos - self.num_waters) > (self.num_coins + 5), "too many coins."
self.init_states = list(range(self.observation_space.n))
self.init_states_ori = deepcopy(self.init_states)
self.observation_space = spaces.Discrete((self.num_pos - self.num_waters) * (2 ** self.num_coins))
self.occupancy_ori = deepcopy(self.occupancy)
if Model is None:
self.model_random = 1
else:
self.model_random = 0
self.Model = Model
self.mode = mode # train or test
self.easy_env = easy_env # if easy_env, #coins in train set is 3-5, in test set is 6-8
self.fix_pos = fix_pos # if fix_init, the start position and the goal is fixed
self.reset()
def basic_step(self, cell, action):
nextcell = tuple(cell + self.directions[action])
if not self.occupancy[nextcell]:
return nextcell
return cell
def not_block(self, water):
# Check whether random water will block the rest space.
water_cell = self.tocell[water]
self.occupancy[self.tocell[water]] = 1
around = self.empty_around(water_cell)
if len(around) == 0:
return True
spread = []
remain = [around[0]]
while len(around) != 0 and len(remain) != 0:
currentcell = remain[0]
remain.remove(currentcell)
spread.append(currentcell)
if currentcell in around:
around.remove(currentcell)
for action in range(self.action_space.n):
nextcell = self.basic_step(currentcell, action)
if nextcell not in spread and nextcell not in remain:
remain.append(nextcell)
if len(around) == 0:
return True
self.occupancy[self.tocell[water]] = 0
return False
def reset(self):
# reset water_list, init_states, occupancy
if self.easy_env:
if self.mode == 'train':
self.num_coins = np.random.choice([3, 4, 5])
self.num_waters = np.random.choice([3, 4, 5])
else:
self.num_coins = np.random.choice([6, 7, 8])
self.num_waters = np.random.choice([6, 7, 8])
self.observation_space = spaces.Discrete((self.num_pos - self.num_waters) * (2 ** self.num_coins))
self.init_states = deepcopy(self.init_states_ori)
self.occupancy = deepcopy(self.occupancy_ori)
water_list = np.array([], dtype=int)
state_list = deepcopy(self.init_states)
for _ in range(self.num_waters):
while True:
if len(state_list) == 0:
water = None
break
water = np.random.choice(state_list)
state_list.remove(water)
if self.not_block(water):
break
if water is not None:
water_list = np.append(water_list, water)
self.init_states.remove(water)
state_list = deepcopy(self.init_states)
else:
raise NotImplementedError("Building waters error.")
# reset goal, position_n, coin_dict, state
super(FourroomsCoin, self).reset()
if self.fix_pos:
self.state.position_n = fix_init
self.state.goal_n = fix_goal
self.state = FourroomsCoinState(self.state, {}, self.num_coins, [])
init_states = deepcopy(self.init_states)
if self.state.goal_n in init_states:
init_states.remove(self.state.goal_n)
if self.state.position_n in init_states:
init_states.remove(self.state.position_n)
coin_list = np.random.choice(init_states, self.num_coins, replace=False)
coin_dict = {coin: (1, True) for coin in coin_list}
self.state.coin_dict = coin_dict
# reset Model, description
if self.model_random:
if self.mode == 'train':
self.Model = np.random.choice(train_list)
else:
self.Model = np.random.choice(test_list)
self.state = FourroomsWaterState(self.state, water_list, self.num_waters, None)
descr = self.todescr(self.Model)
self.state.descr = descr
return self.state.to_obs()
@staticmethod
def todescr(Model: dict):
# Turn the Model to discription
descr = []
for s, t in Model.items():
descr.append(s + ': ' + t)
return tuple(descr)
@staticmethod
def turn(transfer, direction):
# Help the agent to turn direction
if direction == 'right':
if transfer[0]:
transfer *= -1
transfer[[0, 1]] = transfer[[1, 0]]
elif direction == 'left':
if transfer[1]:
transfer *= -1
transfer[[0, 1]] = transfer[[1, 0]]
elif direction == 'inverse':
transfer *= -1
elif direction == 'stay':
transfer = np.array((0, 0))
elif direction == 'normal' or direction == 'forward':
pass
return transfer
def model_step(self, cell, transfer, extra=1, push=0, coin_get=None, push_num=0):
# Help for self.step and debugging, return next_cell, coin_git
if coin_get is None:
coin_get = []
if not push:
if extra:
transfer = self.turn(transfer, self.Model['action'])
else:
if self.Model['extra step'] == 'stay':
return cell, coin_get
transfer = self.turn(transfer, self.Model['extra step'])
next_cell = tuple(cell + transfer)
if self.occupancy[next_cell]:
if self.tostate.get(next_cell, -1) in self.state.water_list:
if self.Model['water'] == 'pass':
pass
elif self.Model['water'] == 'block':
next_cell = cell
return next_cell, coin_get
else:
if push_num < 4:
transfer = self.turn(transfer, self.Model['water'])
return self.model_step(next_cell, transfer, extra, 1, coin_get, push_num+1)
else:
pass
else:
next_cell = cell
return next_cell, coin_get
elif self.state.coin_dict.get(self.tostate[next_cell], (0, False))[1] and\
self.tostate[next_cell] not in coin_get:
coin_get.append(self.tostate[next_cell])
if self.Model['coin'] == 'pass':
pass
else:
if push_num < 4:
transfer = self.turn(transfer, self.Model['coin'])
return self.model_step(next_cell, transfer, extra, 1, coin_get, push_num+1)
if extra:
return self.model_step(next_cell, transfer, 0, 0, coin_get)
return next_cell, coin_get
def step(self, action):
transfer = deepcopy(self.directions[action])
position_cell = self.tocell[self.state.position_n]
next_cell, coin_get = self.model_step(position_cell, transfer, coin_get=[])
reward = 0
done = 0
for coin in coin_get:
self.state.coin_dict[coin] = (self.state.coin_dict[coin][0], False)
reward += self.state.coin_dict[coin][0] * 10
next_state = self.tostate[next_cell]
if self.state.coin_dict.get(next_state, (0, False))[1]:
reward += self.state.coin_dict.get(next_state, 0)[0] * 10
self.state.coin_dict[next_state] = (self.state.coin_dict[next_state][0], False)
elif next_state == self.state.goal_n:
reward += 10
done = 1
if reward == 0:
reward = -0.1
self.state.cum_reward.append(reward)
self.state.position_n = next_state
self.state.current_steps += 1
self.state.done = done or (self.state.current_steps >= self.max_epilen)
info = {}
if self.state.done:
info = {'episode': {'r': np.sum(self.state.cum_reward), 'l': self.state.current_steps}}
return self.state.to_obs(), reward, self.state.done, info
def render(self, mode=0):
blocks = []
return self.render_water_blocks(blocks)
def render_water_blocks(self, blocks=None):
if blocks is None:
blocks = []
for water in self.state.water_list:
x, y = self.tocell[water]
blocks.append(self.make_block(x, y, (0, 1, 0)))
for coin, count in self.state.coin_dict.items():
x, y = self.tocell[coin]
if count[1]: # exist
blocks.append(self.make_block(x, y, (1, 1, 0)))
blocks.extend(self.make_basic_blocks())
arr = self.render_with_blocks(self.origin_background, blocks)
return arr
if __name__ == '__main__':
env = ImageInputWarpper(FourroomsWater())
check_render(env)
check_run(env)
print("Basic check finished.")
| 39.616564 | 115 | 0.600077 |
2290f37c2335ca265c5b926676d8be7c72deda1b | 5,386 | py | Python | py_utils/app.py | mineshpatel1/cerebral-cereal | f0e76bb912af7b63cc523e230ade78b18bf1e1fa | [
"MIT"
] | null | null | null | py_utils/app.py | mineshpatel1/cerebral-cereal | f0e76bb912af7b63cc523e230ade78b18bf1e1fa | [
"MIT"
] | null | null | null | py_utils/app.py | mineshpatel1/cerebral-cereal | f0e76bb912af7b63cc523e230ade78b18bf1e1fa | [
"MIT"
] | null | null | null | import re
import xml.etree.ElementTree as ET
from os import path
from .utils import log, load_json, save_json, prepend, ROOT_DIR
from typing import Any, Dict, Optional
INFO_PLIST_HEADER = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
""".strip()
class App:
def __init__(self, app_name: str):
self.app_name = app_name
@property
def app_dir(self) -> str:
return path.abspath(path.join(ROOT_DIR, self.app_name))
@property
def package_json_path(self) -> str:
return path.join(self.app_dir, 'package.json')
@property
def android_path(self) -> str:
return path.join(self.app_dir, 'android')
@property
def ios_path(self) -> str:
return path.join(self.app_dir, 'ios')
@property
def app_build_config_path(self) -> str:
return path.join(self.android_path, 'app', 'build.gradle')
@property
def info_plist_path(self) -> str:
return path.join(self.ios_path, self.app_name, 'Info.plist')
@property
def ios_project_path(self) -> str:
return path.join(self.ios_path, f'{self.app_name}.xcodeproj', 'project.pbxproj')
@property
def version(self) -> str:
return self.get_package_json()['version']
@property
def version_code(self) -> int:
return int(self.android_default_param('versionCode'))
def get_package_json(self) -> Dict[Any, Any]:
return load_json(self.package_json_path)
def update_package_json(self, update_payload: Dict[Any, Any]):
package_json = self.get_package_json()
package_json.update(update_payload)
save_json(package_json, self.package_json_path, indent=2)
def update_version(
self,
version: str,
increment_code: bool = False,
):
self.update_package_json({"version": version})
self.android_default_param("versionName", f'"{version}"')
self.ios_project_param("MARKETING_VERSION", version)
if increment_code:
self.info_plist_param("CFBundleVersion", "$(CURRENT_PROJECT_VERSION)")
self.set_version_code(self.version_code + 1)
def set_version_code(self, version_code: int):
self.android_default_param("versionCode", str(version_code))
self.ios_project_param("CURRENT_PROJECT_VERSION", str(version_code))
def android_default_param(
self,
param: str,
set_value: Optional[str] = None,
) -> str:
regex = r'android {.*defaultConfig {(.*?)}'
out_value = None
with open(self.app_build_config_path, 'r') as f:
content = f.read()
match = re.search(regex, content, re.DOTALL | re.MULTILINE)
if match:
for line in match.group(1).split('\n'):
config = line.strip()
if config:
config = config.split(' ')
if param == config[0]:
out_value = config[1]
if set_value:
raw = match.group(0)
raw_config = match.group(1)
param_match = re.search(f"{param} (.*)", raw_config)
current_value = param_match.group(1)
new_config = raw_config.replace(f"{param} {current_value}", f"{param} {set_value}")
new_full = raw.replace(raw_config, new_config)
content = content.replace(raw, new_full)
with open(self.app_build_config_path, 'w') as f:
f.write(content)
if not out_value:
raise ValueError(f"Parameter {param} not found in android.defaultConfig.")
return out_value
def info_plist_param(
self,
param: str,
set_value: Optional[str] = None,
) -> ET:
tree = ET.parse(self.info_plist_path)
root = tree.getroot()
match = False
for child in root[0]:
if match:
if child.tag == 'string':
if set_value:
child.text = set_value
with open(self.info_plist_path, 'wb') as f:
tree.write(f)
prepend(self.info_plist_path, INFO_PLIST_HEADER)
return set_value
else:
return child.text
else:
NotImplementedError(f"Info plist tag {child.tag} unsupported.")
if child.tag == 'key':
if child.text == param:
match = True
raise ValueError(f"Cannot find {param} in Info.plist.")
def ios_project_param(
self,
param: str,
set_value: Optional[str] = None,
) -> str:
param = param.upper()
with open(self.ios_project_path, 'r') as f:
content = f.read()
match = re.search(f'{param} = (.*?);', content)
if not match:
raise ValueError(f"Could not find {param} in {self.ios_project_path}.")
current_value = match.group(1)
if set_value:
content = content.replace(match.group(0), f"{param} = {set_value};")
with open(self.ios_project_path, 'w') as f:
f.write(content)
current_value = set_value
return current_value
| 32.642424 | 102 | 0.578351 |
3fdb57674491c22587a106c04b4f9fa42acb59aa | 3,457 | py | Python | plugins_examples/ctf_plugin.py | cybergruppe/cyberbot | 59059b272dd8b5ffc585863a0fa92440159b1e06 | [
"MIT"
] | 10 | 2020-09-23T13:38:35.000Z | 2021-09-15T18:53:21.000Z | plugins_examples/ctf_plugin.py | cybergruppe/cyberbot | 59059b272dd8b5ffc585863a0fa92440159b1e06 | [
"MIT"
] | 17 | 2020-10-25T14:14:11.000Z | 2021-04-04T02:01:24.000Z | plugins_examples/ctf_plugin.py | cybergruppe/cyberbot | 59059b272dd8b5ffc585863a0fa92440159b1e06 | [
"MIT"
] | null | null | null | import json
HELP_DESC = ("""!doing <TASKNAME>\t\t\t-\tTell people that you are doing a specific task
!done <TASKNAME>\t\t\t-\tRemove yourself from task
!finished <TASKNAME>\t\t-\tMark task as solved/finished and remove from task list
!cleardoing\t\t-\tClear current task mapping
""")
async def register_to(plugin):
mapping = {}
async def save_mapping():
nonlocal mapping
await plugin.kvstore_set_local_value("mapping", json.dumps(mapping))
async def load_mapping():
nonlocal mapping
if "mapping" in (await plugin.kvstore_get_local_keys()):
mapping = json.loads(await plugin.kvstore_get_local_value("mapping"))
def format_block(text):
return f"<pre><code>{text}</pre></code>"
def get_arg_as_single(event):
args = plugin.extract_args(event)
args.pop(0)
return " ".join(args)
async def get_displayname(user_id):
try:
# try and except as we can also get an error as answer which does not have the displayname attribute
response = await plugin.client.get_displayname(user_id)
return response.displayname
except:
return None
async def get_name_link(user_id):
dn = await get_displayname(user_id)
if (dn == None):
return "Name error"
# link does not show up anymore
return dn
# return f"<a href='https://matrix.to/#/{user_id}'>{dn}</a>"
async def print_mapping():
nonlocal mapping
s = "CURRENT TASKS\n===========================\n"
for (task, idlist) in mapping.items():
s += f"{task:30}:\t["
s += ", ".join([await get_name_link(user_id) for user_id in idlist])
s += "]\n"
await plugin.send_html(format_block(s))
async def doing_callback(room, event):
nonlocal mapping
arg = get_arg_as_single(event)
if arg not in mapping:
mapping[arg] = [event.source['sender']]
elif event.source['sender'] not in mapping[arg]:
mapping[arg].append(event.source['sender'])
await save_mapping()
await print_mapping()
async def cleardoing_callback(room, event):
nonlocal mapping
mapping = {}
await save_mapping()
await print_mapping()
async def done_callback(room, event):
nonlocal mapping
arg = get_arg_as_single(event)
if (arg in mapping):
sender_id = event.source['sender']
if sender_id in mapping[arg]:
mapping[arg].remove(sender_id)
if len(mapping[arg]) == 0:
mapping.pop(arg)
await save_mapping()
await print_mapping()
async def finished_callback(room, event):
nonlocal mapping
arg = get_arg_as_single(event)
if (arg in mapping):
mapping.pop(arg)
await save_mapping()
await print_mapping()
await load_mapping()
doing_handler = plugin.CommandHandler("doing", doing_callback)
plugin.add_handler(doing_handler)
cleardoing_handler = plugin.CommandHandler("cleardoing", cleardoing_callback)
plugin.add_handler(cleardoing_handler)
done_handler = plugin.CommandHandler("done", done_callback)
plugin.add_handler(done_handler)
finished_handler = plugin.CommandHandler("finished", finished_callback)
plugin.add_handler(finished_handler)
| 31.144144 | 112 | 0.622216 |
f8a72413797b83df779f5c4978156c3880bbd294 | 1,404 | py | Python | rx/operators/observable/toasync.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | rx/operators/observable/toasync.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/operators/observable/toasync.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-08T08:23:08.000Z | 2020-05-08T08:23:08.000Z | from typing import Callable
from rx.core import ObservableBase
from rx.concurrency import timeout_scheduler
from rx.subjects import AsyncSubject
def to_async(func: Callable, scheduler=None) -> Callable:
"""Converts the function into an asynchronous function. Each
invocation of the resulting asynchronous function causes an
invocation of the original synchronous function on the specified
scheduler.
Example:
res = Observable.to_async(lambda x, y: x + y)(4, 3)
res = Observable.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3)
res = Observable.to_async(lambda x: log.debug(x),
Scheduler.timeout)('hello')
Keyword arguments:
func -- Function to convert to an asynchronous function.
scheduler -- [Optional] Scheduler to run the function on. If not
specified, defaults to Scheduler.timeout.
Returns asynchronous function.
"""
scheduler = scheduler or timeout_scheduler
def wrapper(*args) -> ObservableBase:
subject = AsyncSubject()
def action(scheduler, state):
try:
result = func(*args)
except Exception as ex:
subject.on_error(ex)
return
subject.on_next(result)
subject.on_completed()
scheduler.schedule(action)
return subject.as_observable()
return wrapper
| 30.521739 | 74 | 0.656695 |
982deb3e1f3b4f34d6438e88c30ee0b96f637c0f | 226 | py | Python | betfund_event_broker/flows/__init__.py | betfund/betfund-event-broker | 524aec73d9cf66cbeeb0fab67e6816b836c1d98e | [
"MIT"
] | 1 | 2020-09-23T02:36:35.000Z | 2020-09-23T02:36:35.000Z | betfund_event_broker/flows/__init__.py | betfund/betfund-event-broker | 524aec73d9cf66cbeeb0fab67e6816b836c1d98e | [
"MIT"
] | 5 | 2020-04-13T23:55:07.000Z | 2020-06-04T15:09:12.000Z | betfund_event_broker/flows/__init__.py | betfund/betfund-event-broker | 524aec73d9cf66cbeeb0fab67e6816b836c1d98e | [
"MIT"
] | null | null | null | """Flow namespace."""
from .base_flow import EventBrokerFlow
from .prematch_odds import PreMatchOddsFlow
from .upcoming_events import UpcomingEventsFlow
__all__ = ["EventBrokerFlow", "PreMatchOddsFlow", "UpcomingEventsFlow"]
| 32.285714 | 71 | 0.818584 |
c848858a357afa6b872a3b2d4eab2696b3dda851 | 6,885 | py | Python | cairis/test/test_LocationsAPI.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 62 | 2019-08-23T02:42:29.000Z | 2022-03-29T10:52:19.000Z | cairis/test/test_LocationsAPI.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 223 | 2019-07-29T09:49:54.000Z | 2022-03-29T09:48:21.000Z | cairis/test/test_LocationsAPI.py | anonymous-author21/cairis | feccb3ecc94ec864dbc87393e21de22bea704e19 | [
"Apache-2.0"
] | 32 | 2019-10-14T12:27:42.000Z | 2022-03-19T08:08:23.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
from io import StringIO
import os
import json
import jsonpickle
from cairis.core.Location import Location
from cairis.core.Locations import Locations
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.mio.ModelImport import importModelFile,importLocationsFile
from cairis.tools.JsonConverter import json_deserialize
import os
__author__ = 'Shamal Faily'
class LocationsAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/ACME_Water/ACME_Water.xml',1,'test')
importLocationsFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/ACME_Water/PooleWWTW.xml','test')
def setUp(self):
self.logger = logging.getLogger(__name__)
f = open(os.environ['CAIRIS_SRC'] + '/test/locations.json')
d = json.load(f)
f.close()
iLocations = d['locations']
newLocsName = iLocations[1]['theName']
newLocsDia = iLocations[1]['theDiagram']
newLocations = []
iLoc1 = iLocations[1]['theLocations'][0]
iLoc1Name = iLoc1['theName']
iLoc1AssetInstances = []
iLoc1PersonaInstances = []
iLoc1Links = iLoc1['theLinks']
newLocations.append(Location(-1,iLoc1Name,iLoc1AssetInstances,iLoc1PersonaInstances,iLoc1Links))
iLoc2 = iLocations[1]['theLocations'][1]
iLoc2Name = iLoc2['theName']
iLoc2AssetInstances = [{'theName':iLoc2['theAssetInstances'][0]['theName'],'theAsset':iLoc2['theAssetInstances'][0]['theAsset']}]
iLoc2PersonaInstances = [{'theName':iLoc2['thePersonaInstances'][0]['theName'],'thePersona':iLoc2['thePersonaInstances'][0]['thePersona']}]
iLoc2Links = iLoc2['theLinks']
newLocations.append(Location(-1,iLoc2Name,iLoc2AssetInstances,iLoc2PersonaInstances,iLoc2Links))
self.new_locs = Locations(
locsId = '-1',
locsName = newLocsName,
locsDiagram = newLocsDia,
locs = newLocations)
self.new_locs_dict = {
'session_id' : 'test',
'object': self.new_locs
}
self.existing_locs_name = 'PooleWWTW'
def test_get_all(self):
method = 'test_get_locations'
url = '/api/locations?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
locs = jsonpickle.decode(responseData)
self.assertIsNotNone(locs, 'No results after deserialization')
self.assertIsInstance(locs, list, 'The result is not a dictionary as expected')
self.assertGreater(len(locs), 0, 'No Locations in the dictionary')
self.logger.info('[%s] Locations found: %d', method, len(locs))
locs = locs[0]
self.logger.info('[%s] First locations: %s\n', method, locs['theName'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/locations/name/%s?session_id=test' % quote(self.existing_locs_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
locs = jsonpickle.decode(responseData)
self.assertIsNotNone(locs, 'No results after deserialization')
self.logger.info('[%s] Locations: %s\n', method, locs['theName'])
def test_post(self):
method = 'test_post_new'
rv = self.app.post('/api/locations', content_type='application/json', data=jsonpickle.encode(self.new_locs_dict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'SturminsterWTW created')
def test_put(self):
method = 'test_put'
url = '/api/locations/name/%s?session_id=test' % quote(self.new_locs_dict['object'].theName)
rv = self.app.put(url, content_type='application/json', data=jsonpickle.encode(self.new_locs_dict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'SturminsterWTW updated')
def test_delete(self):
method = 'test_delete'
rv = self.app.post('/api/locations', content_type='application/json', data=jsonpickle.encode(self.new_locs_dict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
url = '/api/locations/name/%s?session_id=test' % quote(self.new_locs.theName)
rv = self.app.delete(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'SturminsterWTW deleted')
def test_locations_model(self):
method = 'test_locations_model'
url = '/api/locations/model/locations/' + quote(self.existing_locs_name) + '/environment/Day?session_id=test'
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
| 40.5 | 143 | 0.704285 |
02c91127c883d940328d7be05ab5db0a480df0e2 | 758 | py | Python | snakeai/agent/__init__.py | mxpoliakov/snake-ai-reinforcement | 770fc62eb5704a158f2bcb512b801b5c5db69075 | [
"MIT"
] | null | null | null | snakeai/agent/__init__.py | mxpoliakov/snake-ai-reinforcement | 770fc62eb5704a158f2bcb512b801b5c5db69075 | [
"MIT"
] | null | null | null | snakeai/agent/__init__.py | mxpoliakov/snake-ai-reinforcement | 770fc62eb5704a158f2bcb512b801b5c5db69075 | [
"MIT"
] | null | null | null | class AgentBase(object):
""" Represents an intelligent agent for the Snake environment. """
def begin_episode(self):
""" Reset the agent for a new episode. """
pass
def act(self, observation, reward):
"""
Choose the next action to take.
Args:
observation: observable state for the current timestep.
reward: reward received at the beginning of the current timestep.
Returns:
The index of the action to take next.
"""
return None
def end_episode(self):
""" Notify the agent that the episode has ended. """
pass
from .dqn import DeepQNetworkAgent
from .human import HumanAgent
from .random_action import RandomActionAgent
| 26.137931 | 77 | 0.629288 |
9a961acd52195abe8f398874e94b44097b1040eb | 344 | py | Python | tests/__init__.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | 3 | 2016-08-31T23:09:58.000Z | 2016-08-31T23:10:00.000Z | tests/__init__.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | null | null | null | tests/__init__.py | elventear/powerline | cddfc364c1bf269f721dfdbd765cde2649a3410d | [
"MIT"
] | 1 | 2015-01-09T21:02:50.000Z | 2015-01-09T21:02:50.000Z | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
if sys.version_info < (2, 7):
from unittest2 import TestCase, main # NOQA
from unittest2.case import SkipTest # NOQA
else:
from unittest import TestCase, main # NOQA
from unittest.case import SkipTest # NOQA
| 28.666667 | 84 | 0.773256 |
57356e4bdae07c1718e0096b3fcc7fefcbd9a184 | 6,916 | py | Python | copysig.py | rec/copysig | 31ee6aeb954cc4f1ad0db5518bfd6be0ebc9de82 | [
"MIT"
] | 1 | 2019-05-26T15:10:42.000Z | 2019-05-26T15:10:42.000Z | copysig.py | rec/copysig | 31ee6aeb954cc4f1ad0db5518bfd6be0ebc9de82 | [
"MIT"
] | null | null | null | copysig.py | rec/copysig | 31ee6aeb954cc4f1ad0db5518bfd6be0ebc9de82 | [
"MIT"
] | null | null | null | import inspect
def copysig(from_func, *args_to_remove):
def wrap(func):
#add and remove parameters
oldsig= inspect.signature(from_func)
oldsig= _remove_args(oldsig, args_to_remove)
newsig= _add_args(oldsig, func)
#write some code for a function that we can exec
#the function will have the correct signature and forward its arguments to the real function
code= '''
def {name}{signature}:
{func}({args})
'''.format(name=func.__name__,
signature=newsig,
func='_'+func.__name__,
args=_forward_args(oldsig, newsig))
globs= {'_'+func.__name__: func}
exec(code, globs)
newfunc= globs[func.__name__]
#copy as many attributes as possible
newfunc.__doc__= func.__doc__
newfunc.__module__= func.__module__
#~ newfunc.__closure__= func.__closure__
#~ newfunc.__code__.co_filename= func.__code__.co_filename
#~ newfunc.__code__.co_firstlineno= func.__code__.co_firstlineno
return newfunc
return wrap
def _collectargs(sig):
"""
Writes code that gathers all parameters into "self" (if present), "args" and "kwargs"
"""
arglist= list(sig.parameters.values())
#check if the first parameter is "self"
selfarg= ''
if arglist:
arg= arglist[0]
if arg.name=='self':
selfarg= 'self, '
del arglist[0]
#all named parameters will be passed as kwargs. args is only used for varargs.
args= 'tuple(), '
kwargs= ''
kwarg= ''
for arg in arglist:
if arg.kind in (arg.POSITIONAL_ONLY,arg.POSITIONAL_OR_KEYWORD,arg.KEYWORD_ONLY):
kwargs+= '("{0}",{0}), '.format(arg.name)
elif arg.kind==arg.VAR_POSITIONAL:
#~ assert not args
args= arg.name+', '
elif arg.kind==arg.VAR_KEYWORD:
assert not kwarg
kwarg= 'list({}.items())+'.format(arg.name)
else:
assert False, arg.kind
kwargs= 'dict({}[{}])'.format(kwarg, kwargs[:-2])
return '{}{}{}'.format(selfarg, args, kwargs)
def _forward_args(args_to_collect, sig):
collect= _collectargs(args_to_collect)
collected= {arg.name for arg in args_to_collect.parameters.values()}
args= ''
for arg in sig.parameters.values():
if arg.name in collected:
continue
if arg.kind==arg.VAR_POSITIONAL:
args+= '*{}, '.format(arg.name)
elif arg.kind==arg.VAR_KEYWORD:
args+= '**{}, '.format(arg.name)
else:
args+= '{0}={0}, '.format(arg.name)
args= args[:-2]
code= '{}, {}'.format(collect, args) if args else collect
return code
def _remove_args(signature, args_to_remove):
"""
Removes named parameters from a signature.
"""
args_to_remove= set(args_to_remove)
varargs_removed= False
args= []
for arg in signature.parameters.values():
if arg.name in args_to_remove:
if arg.kind==arg.VAR_POSITIONAL:
varargs_removed= True
continue
if varargs_removed and arg.kind==arg.KEYWORD_ONLY:#if varargs have been removed, there are no more keyword-only parameters
arg= arg.replace(kind=arg.POSITIONAL_OR_KEYWORD)
args.append(arg)
return signature.replace(parameters=args)
def _add_args(sig, func):
"""
Merges a signature and a function into a signature that accepts ALL the parameters.
"""
funcsig= inspect.signature(func)
#find out where we want to insert the new parameters
#parameters with a default value will be inserted before *args (if any)
#if parameters with a default value exist, parameters with no default value will be inserted as keyword-only AFTER *args
vararg= None
kwarg= None
insert_index_default= None
insert_index_nodefault= None
default_found= False
args= list(sig.parameters.values())
for index,arg in enumerate(args):
if arg.kind==arg.VAR_POSITIONAL:
vararg= arg
insert_index_default= index
if default_found:
insert_index_nodefault= index+1
else:
insert_index_nodefault= index
elif arg.kind==arg.VAR_KEYWORD:
kwarg= arg
if insert_index_default is None:
insert_index_default= insert_index_nodefault= index
else:
if arg.default!=arg.empty:
default_found= True
if insert_index_default is None:
insert_index_default= insert_index_nodefault= len(args)
#find the new parameters
#skip the first two parameters (args and kwargs)
newargs= list(funcsig.parameters.values())
if not newargs:
raise Exception('The decorated function must accept at least 2 parameters')
#if the first parameter is called "self", ignore the first 3 parameters
if newargs[0].name=='self':
del newargs[0]
if len(newargs)<2:
raise Exception('The decorated function must accept at least 2 parameters')
newargs= newargs[2:]
#add the new parameters
if newargs:
new_vararg= None
for arg in newargs:
if arg.kind==arg.VAR_POSITIONAL:
if vararg is None:
new_vararg= arg
else:
raise Exception('Cannot add varargs to a function that already has varargs')
elif arg.kind==arg.VAR_KEYWORD:
if kwarg is None:
args.append(arg)
else:
raise Exception('Cannot add kwargs to a function that already has kwargs')
else:
#we can insert it as a positional parameter if it has a default value OR no other parameter has a default value
if arg.default!=arg.empty or not default_found:
#do NOT change the parameter kind here. Leave it as it was, so that the order of varargs and keyword-only parameters is preserved.
args.insert(insert_index_default, arg)
insert_index_nodefault+= 1
insert_index_default+= 1
else:
arg= arg.replace(kind=arg.KEYWORD_ONLY)
args.insert(insert_index_nodefault, arg)
if insert_index_default==insert_index_nodefault:
insert_index_default+= 1
insert_index_nodefault+= 1
#if varargs need to be added, insert them before keyword-only arguments
if new_vararg is not None:
for i,arg in enumerate(args):
if arg.kind not in (arg.POSITIONAL_ONLY,arg.POSITIONAL_OR_KEYWORD):
break
else:
i+= 1
args.insert(i, new_vararg)
return inspect.Signature(args, return_annotation=funcsig.return_annotation)
| 36.592593 | 150 | 0.612782 |
9b0f61def53ebad5ee0cac00df6acde1e2df5447 | 209 | py | Python | tests/test_unitest_module_invocation.py | svisser/aiounittest | 1d7c0e1deac720a3608d4941f56f73b74a206401 | [
"MIT"
] | 55 | 2017-08-18T10:24:05.000Z | 2022-03-21T08:29:19.000Z | tests/test_unitest_module_invocation.py | svisser/aiounittest | 1d7c0e1deac720a3608d4941f56f73b74a206401 | [
"MIT"
] | 15 | 2017-09-22T13:14:43.000Z | 2022-01-23T16:29:22.000Z | tests/test_unitest_module_invocation.py | svisser/aiounittest | 1d7c0e1deac720a3608d4941f56f73b74a206401 | [
"MIT"
] | 4 | 2019-11-26T18:08:43.000Z | 2021-06-01T22:12:00.000Z | import sys
from unittest.main import main
def test_specific_test():
sys.argv = ['TEST']
sys.argv.append('test_asynctestcase.TestAsyncCase.test_await_async_add')
a = main(module=None, exit=False)
| 23.222222 | 76 | 0.741627 |
6537f7cc23f247441169e14dc167f022b94573e7 | 115 | py | Python | unweaver/databases/geopackage/__init__.py | jsbeckwith/unweaver | a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0 | [
"Apache-2.0"
] | 4 | 2019-04-24T16:38:57.000Z | 2021-12-28T20:38:08.000Z | unweaver/databases/geopackage/__init__.py | jsbeckwith/unweaver | a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0 | [
"Apache-2.0"
] | 3 | 2021-06-02T04:06:33.000Z | 2021-11-02T01:47:20.000Z | unweaver/databases/geopackage/__init__.py | jsbeckwith/unweaver | a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0 | [
"Apache-2.0"
] | 1 | 2020-08-13T04:42:05.000Z | 2020-08-13T04:42:05.000Z | from .geopackage import GeoPackage
from .feature_table import FeatureTable
from .geom_types import GeoPackageGeoms
| 28.75 | 39 | 0.869565 |
97459fbf736ae3c42651c4e7132cbbd9122fb3b9 | 12,325 | py | Python | Lib/distutils/text_file.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | 1 | 2021-12-26T22:20:34.000Z | 2021-12-26T22:20:34.000Z | Lib/distutils/text_file.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | Lib/distutils/text_file.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | """text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
__revision__ = "$Id$"
import sys, os, io
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using 'io.open()'.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
}
def __init__(self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError("you must supply either or both of 'filename' and 'file'")
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr(self, opt, options[opt])
else:
setattr(self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError("invalid TextFile option '%s'" % opt)
if file is None:
self.open(filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open(self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = io.open(self.filename, 'r')
self.current_line = 0
def close(self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
self.file.close()
self.file = None
self.filename = None
self.current_line = None
def gen_error(self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if isinstance(line, (list, tuple)):
outmsg.append("lines %d-%d: " % tuple(line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return "".join(outmsg)
def error(self, msg, line=None):
raise ValueError("error: " + self.gen_error(msg, line))
def warn(self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline(self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while True:
# read the line, make it None if EOF
line = self.file.readline()
if line == '':
line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = line.find("#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if line.strip() == "":
continue
else: # it's an escaped "#"
line = line.replace("\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = line.lstrip()
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if isinstance(self.current_line, list):
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line + 1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if isinstance(self.current_line, list):
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = line.strip()
elif self.lstrip_ws:
line = line.lstrip()
elif self.rstrip_ws:
line = line.rstrip()
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
def readlines(self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while True:
line = self.readline()
if line is None:
return lines
lines.append(line)
def unreadline(self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append(line)
| 43.245614 | 89 | 0.561623 |
66bf7d0ed520442c205e1cdfdd65aea5ffe69723 | 6,986 | py | Python | torchgeo/samplers/single.py | GIShkl/GAOFEN2021_CHANGEDETECTION | 5b7251cb1e951a04c7effacab6c1233232158472 | [
"MIT"
] | 3 | 2021-12-12T09:45:41.000Z | 2022-03-10T08:34:22.000Z | torchgeo/samplers/single.py | lyp19/GAOFEN2021_CHANGEDETECTION | 5b7251cb1e951a04c7effacab6c1233232158472 | [
"MIT"
] | null | null | null | torchgeo/samplers/single.py | lyp19/GAOFEN2021_CHANGEDETECTION | 5b7251cb1e951a04c7effacab6c1233232158472 | [
"MIT"
] | 1 | 2021-11-13T05:40:18.000Z | 2021-11-13T05:40:18.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""TorchGeo samplers."""
import abc
import random
from typing import Iterator, Optional, Tuple, Union
from rtree.index import Index
from torch.utils.data import Sampler
from torchgeo.datasets import BoundingBox
from .utils import _to_tuple, get_random_bounding_box
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Sampler.__module__ = "torch.utils.data"
class GeoSampler(Sampler[BoundingBox], abc.ABC):
"""Abstract base class for sampling from :class:`~torchgeo.datasets.GeoDataset`.
Unlike PyTorch's :class:`~torch.utils.data.Sampler`, :class:`GeoSampler`
returns enough geospatial information to uniquely index any
:class:`~torchgeo.datasets.GeoDataset`. This includes things like latitude,
longitude, height, width, projection, coordinate system, and time.
"""
@abc.abstractmethod
def __iter__(self) -> Iterator[BoundingBox]:
"""Return the index of a dataset.
Returns:
(minx, maxx, miny, maxy, mint, maxt) coordinates to index a dataset
"""
class RandomGeoSampler(GeoSampler):
"""Samples elements from a region of interest randomly.
This is particularly useful during training when you want to maximize the size of
the dataset and return as many random :term:`chips <chip>` as possible.
This sampler is not recommended for use with tile-based datasets. Use
:class:`RandomBatchGeoSampler` instead.
"""
def __init__(
self,
index: Index,
size: Union[Tuple[float, float], float],
length: int,
roi: Optional[BoundingBox] = None,
) -> None:
"""Initialize a new Sampler instance.
The ``size`` argument can either be:
* a single ``float`` - in which case the same value is used for the height and
width dimension
* a ``tuple`` of two floats - in which case, the first *float* is used for the
height dimension, and the second *float* for the width dimension
Args:
index: index of a :class:`~torchgeo.datasets.GeoDataset`
size: dimensions of each :term:`patch` in units of CRS
length: number of random samples to draw per epoch
roi: region of interest to sample from (minx, maxx, miny, maxy, mint, maxt)
(defaults to the bounds of ``index``)
"""
self.index = index
self.size = _to_tuple(size)
self.length = length
if roi is None:
roi = BoundingBox(*index.bounds)
self.roi = roi
self.hits = list(index.intersection(roi, objects=True))
def __iter__(self) -> Iterator[BoundingBox]:
"""Return the index of a dataset.
Returns:
(minx, maxx, miny, maxy, mint, maxt) coordinates to index a dataset
"""
for _ in range(len(self)):
# Choose a random tile
hit = random.choice(self.hits)
bounds = BoundingBox(*hit.bounds)
# Choose a random index within that tile
bounding_box = get_random_bounding_box(bounds, self.size)
yield bounding_box
def __len__(self) -> int:
"""Return the number of samples in a single epoch.
Returns:
length of the epoch
"""
return self.length
class GridGeoSampler(GeoSampler):
"""Samples elements in a grid-like fashion.
This is particularly useful during evaluation when you want to make predictions for
an entire region of interest. You want to minimize the amount of redundant
computation by minimizing overlap between :term:`chips <chip>`.
Usually the stride should be slightly smaller than the chip size such that each chip
has some small overlap with surrounding chips. This is used to prevent `stitching
artifacts <https://arxiv.org/abs/1805.12219>`_ when combining each prediction patch.
The overlap between each chip (``chip_size - stride``) should be approximately equal
to the `receptive field <https://distill.pub/2019/computing-receptive-fields/>`_ of
the CNN.
When sampling from :class:`~torchgeo.datasets.ZipDataset`, the ``index`` should come
from a non-tile-based dataset if possible.
"""
def __init__(
self,
index: Index,
size: Union[Tuple[float, float], float],
stride: Union[Tuple[float, float], float],
roi: Optional[BoundingBox] = None,
) -> None:
"""Initialize a new Sampler instance.
The ``size`` and ``stride`` arguments can either be:
* a single ``float`` - in which case the same value is used for the height and
width dimension
* a ``tuple`` of two floats - in which case, the first *float* is used for the
height dimension, and the second *float* for the width dimension
Args:
index: index of a :class:`~torchgeo.datasets.GeoDataset`
size: dimensions of each :term:`patch` in units of CRS
stride: distance to skip between each patch
roi: region of interest to sample from (minx, maxx, miny, maxy, mint, maxt)
"""
self.index = index
self.size = _to_tuple(size)
self.stride = _to_tuple(stride)
if roi is None:
roi = BoundingBox(*index.bounds)
self.roi = roi
self.hits = list(index.intersection(roi, objects=True))
self.length: int = 0
for hit in self.hits:
bounds = BoundingBox(*hit.bounds)
rows = int((bounds.maxy - bounds.miny - self.size[0]) // self.stride[0]) + 1
cols = int((bounds.maxx - bounds.minx - self.size[1]) // self.stride[1]) + 1
self.length += rows * cols
def __iter__(self) -> Iterator[BoundingBox]:
"""Return the index of a dataset.
Returns:
(minx, maxx, miny, maxy, mint, maxt) coordinates to index a dataset
"""
# For each tile...
for hit in self.hits:
bounds = BoundingBox(*hit.bounds)
rows = int((bounds.maxy - bounds.miny - self.size[0]) // self.stride[0]) + 1
cols = int((bounds.maxx - bounds.minx - self.size[1]) // self.stride[1]) + 1
mint = bounds.mint
maxt = bounds.maxt
# For each row...
for i in range(rows):
miny = bounds.miny + i * self.stride[0]
maxy = miny + self.size[0]
# For each column...
for j in range(cols):
minx = bounds.minx + j * self.stride[1]
maxx = minx + self.size[1]
yield BoundingBox(minx, maxx, miny, maxy, mint, maxt)
def __len__(self) -> int:
"""Return the number of samples over the ROI.
Returns:
number of patches that will be sampled
"""
return self.length
| 35.461929 | 88 | 0.618523 |
736884b369bc2ab92b90fb83eba6c641dd40e168 | 1,637 | py | Python | examples/ex3.py | whoopnip/pypi-sphinx-quickstart | 2f965a5ed0c6d45873542461f37cfd2eb1f62a7b | [
"MIT"
] | 1 | 2020-05-19T11:17:52.000Z | 2020-05-19T11:17:52.000Z | examples/ex3.py | whoopnip/pypi-sphinx-quickstart | 2f965a5ed0c6d45873542461f37cfd2eb1f62a7b | [
"MIT"
] | 15 | 2019-08-05T12:28:03.000Z | 2020-01-19T15:37:57.000Z | examples/ex3.py | whoopnip/pypi-sphinx-quickstart | 2f965a5ed0c6d45873542461f37cfd2eb1f62a7b | [
"MIT"
] | 2 | 2020-02-13T19:48:38.000Z | 2021-08-09T11:17:10.000Z | # -*- coding: utf-8 -*-
"""
Tertiary example - Plotting sin3
===================================
This is a general example demonstrating a Matplotlib plot output, embedded
rST, the use of math notation and cross-linking to other examples. It would be
useful to compare with the
output below.
.. math::
x \\rightarrow \\sin(x)
Here the function :math:`\\sin` is evaluated at each point the variable
:math:`x` is defined.
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$\sin(x)$')
# To avoid matplotlib text output
plt.show()
#%%
# To include embedded rST, use a line of >= 20 ``#``'s or ``#%%`` between your
# rST and your. This separates your example
# into distinct text and code blocks. You can continue writing code below the
# embedded rST text block:
print('This example shows a sin plot!')
#%%
from py_qs_example.mymodule import ExampleClass, example_function, less_important_function
ec = ExampleClass(5)
ec
#%%
output = example_function(ec, '_test')
output
#%%
# LaTeX syntax in the text blocks does not require backslashes to be escaped:
#
# .. math::
# \sin
#
# Cross referencing
# ^^^^^^^^^^^^^^^^^
#
# You can refer to an example from any part of the documentation,
# including from other examples. Sphinx-Gallery automatically creates reference
# labels for each example. The label consists of the ``.py`` file name,
# prefixed with ``sphx_glr_`` and the name of the
# folder(s) the example is in. In this case, the example we want to
# cross-reference is in ``auto_examples`` (the ``gallery_dirs``; see | 26.403226 | 90 | 0.691509 |
c8364d1977dbece0fd087a334c7d289f428a6a7c | 1,788 | py | Python | tensorstream/common/extremum_spec.py | clems4ever/tensorstream | 61bff14f65f71bdd4ab58aefbd6eda79ec5863cb | [
"Apache-2.0"
] | 5 | 2019-04-10T03:51:13.000Z | 2020-07-12T10:50:24.000Z | tensorstream/common/extremum_spec.py | clems4ever/tensorstream | 61bff14f65f71bdd4ab58aefbd6eda79ec5863cb | [
"Apache-2.0"
] | null | null | null | tensorstream/common/extremum_spec.py | clems4ever/tensorstream | 61bff14f65f71bdd4ab58aefbd6eda79ec5863cb | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorstream.common.extremum import GlobalMinimum, GlobalMaximum, LocalMinimum, LocalMaximum
from tensorstream.tests import TestCase
class ExtremumSpec(TestCase):
def setUp(self):
self.sheets = self.read_ods(
self.from_test_res('extremum.ods', __file__))
self.sheet = self.sheets['Sheet1']
def test_global_min(self):
global_minimum = GlobalMinimum()
values = tf.placeholder(tf.float32)
global_minimum_ts, _, _ = global_minimum(values)
with tf.Session() as sess:
output = sess.run(global_minimum_ts, { values: self.sheet['Value'] })
np.testing.assert_almost_equal(output,
self.sheet['GlobalMin'].values, decimal=3)
def test_global_max(self):
global_maximum = GlobalMaximum()
values = tf.placeholder(tf.float32)
global_maximum_ts, _, _ = global_maximum(values)
with tf.Session() as sess:
output = sess.run(global_maximum_ts, { values: self.sheet['Value'] })
np.testing.assert_almost_equal(output,
self.sheet['GlobalMax'].values, decimal=3)
def test_local_min(self):
local_minimum = LocalMinimum(5)
values = tf.placeholder(tf.float32)
local_minimum_ts, _, _ = local_minimum(values)
with tf.Session() as sess:
output = sess.run(local_minimum_ts, { values: self.sheet['Value'] })
np.testing.assert_almost_equal(output,
self.sheet['LocalMin'].values, decimal=3)
def test_local_max(self):
local_maximum = LocalMaximum(5)
values = tf.placeholder(tf.float32)
local_maximum_ts, _, _ = local_maximum(values)
with tf.Session() as sess:
output = sess.run(local_maximum_ts, { values: self.sheet['Value'] })
np.testing.assert_almost_equal(output,
self.sheet['LocalMax'].values, decimal=3)
| 31.928571 | 97 | 0.709172 |
10f64569a8f0b84d879e42c3434c9197cb0a0e6b | 735 | py | Python | Django/first_app/blog/models.py | anonhyme/Sandbox | 49906a0bc78a59916f37448a9b36a46e5410c4ba | [
"Unlicense"
] | null | null | null | Django/first_app/blog/models.py | anonhyme/Sandbox | 49906a0bc78a59916f37448a9b36a46e5410c4ba | [
"Unlicense"
] | null | null | null | Django/first_app/blog/models.py | anonhyme/Sandbox | 49906a0bc78a59916f37448a9b36a46e5410c4ba | [
"Unlicense"
] | null | null | null | from django.db import models
class Categorie(models.Model):
nom = models.CharField(max_length=30)
def __unicode__(self):
return self.nom
class Article(models.Model):
titre = models.CharField(max_length=100)
slug = models.SlugField(max_length = 100)
auteur = models.CharField(max_length=42)
contenu = models.TextField(null=True)
date = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name="Date de sortie")
categorie = models.ForeignKey('Categorie')
def __unicode__(self):
"""
Cette methode que nous definirons dans tous les modeles
nous permettra de reconnaitre facilement les differents objets que nous
traiterons plus tard et dans l'administration
"""
return self.titre
| 33.409091 | 94 | 0.75102 |
8b767ccdafe47bc57a8bad54ad4e97d2e417b443 | 4,967 | py | Python | python3/libs/colorama/win32.py | Jgoodz/python_koans | 5669fb398f18a7e645938c51c03f03147695da83 | [
"MIT"
] | null | null | null | python3/libs/colorama/win32.py | Jgoodz/python_koans | 5669fb398f18a7e645938c51c03f03147695da83 | [
"MIT"
] | null | null | null | python3/libs/colorama/win32.py | Jgoodz/python_koans | 5669fb398f18a7e645938c51c03f03147695da83 | [
"MIT"
] | null | null | null | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
from ctypes import wintypes
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", wintypes._COORD),
("dwCursorPosition", wintypes._COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", wintypes._COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
wintypes._COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
wintypes._COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = wintypes._COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = wintypes._COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| 36.792593 | 112 | 0.644051 |
48b26f752cc36977613a78698c199d4822f67a10 | 3,562 | py | Python | third_party/tensorflow/building/CONV/builder.py | doslindos/ml_crapwrap | f9daa2904234492921c6c344bfcd24992e2ff421 | [
"MIT"
] | null | null | null | third_party/tensorflow/building/CONV/builder.py | doslindos/ml_crapwrap | f9daa2904234492921c6c344bfcd24992e2ff421 | [
"MIT"
] | null | null | null | third_party/tensorflow/building/CONV/builder.py | doslindos/ml_crapwrap | f9daa2904234492921c6c344bfcd24992e2ff421 | [
"MIT"
] | null | null | null | from ..util import create_weights, create_bias
def conv_transpose_weights_creation_loop(weights_list, reversed_bs):
weights = []
bias = []
for i, w in enumerate(weights_list):
w_var = w
if reversed_bs[i] is not None:
b_var = reversed_bs[i]
else:
b_var = None
weights.append(w_var)
bias.append(b_var)
return (weights, bias)
def conv_weights_creation_loop(kernel_size, filters, use_bias, weight_dtype, transpose):
# Loop to create convolutional weights
# In:
# filters: list, From model configurations.py 'Convo' 'filters'
# kernel_size: list, From model configurations.py 'Convo' 'kernel_size'
# use_bias: bool, use biases
# weight_dtype: str, datatype used with weights
# tranpose:
# Create "connections" between every layer
# Configurations 'weights' represents number of units in a hidden layer,
# therefore the number of connections between layers is 'weights' list lenght - 1
weights = []
bias = []
connections = len(filters)-1
for connection in range(connections):
if connection < connections:
#print(connection, connections)
# Define conv weight kernel and filter sizes
k1 = kernel_size[connection][0]
k2 = kernel_size[connection][1]
if not transpose:
f1 = filters[connection]
f2 = filters[connection+1]
else:
f1 = filters[connection+1]
f2 = filters[connection]
# Create weights
w_var = create_weights([k1, k2, f1, f2], dtype=weight_dtype)
if use_bias:
# Create bias
b_var = create_bias([f2], dtype=weight_dtype)
else:
b_var = None
weights.append(w_var)
bias.append(b_var)
else:
break
return (weights, bias)
def initialize_conv_layer(
layer_name,
input_dtype,
conf,
weights,
bias,
transpose
):
# Create weights for the CONV layer if not made
# In:
# conf: dict, configuration
# Out:
# (weigths, bias): (dict, dict) modified weights dicts
if not layer_name in list(weights.keys()):
# Use reversed weights
if isinstance(transpose, str) and 'kernel_sizes' not in conf.keys():
if conf['use_bias']:
reversed_bs = list(reversed(bias[conf['transpose']][1]))
else:
reversed_bs = None
#print([i.shape for i in weights[conf['transpose']][1]])
cws, cbs = conv_transpose_weights_creation_loop(
list(reversed(weights[conf['transpose']][1])),
reversed_bs,
)
trainable_vars = False
# Create new weights
else:
# Create new weights
cws, cbs = conv_weights_creation_loop(
conf['kernel_sizes'],
conf['filters'],
conf['use_bias'],
input_dtype,
transpose
)
trainable_vars = True
weights[layer_name] = (trainable_vars, cws)
bias[layer_name] = (trainable_vars, cbs)
return (weights, bias)
| 32.981481 | 92 | 0.527793 |
9ab144085893f8da182bc796e081c5104cc971c8 | 5,546 | py | Python | USPTO-15K/rank-diff-wln/oracletest.py | wengong-jin/nips17-rexgen | fb7dea369b0721b88cd0133a7d66348d244f65d3 | [
"MIT"
] | 113 | 2017-09-22T19:42:50.000Z | 2022-02-05T03:11:27.000Z | USPTO-15K/rank-diff-wln/oracletest.py | wibrow/nips17-rexgen | fb7dea369b0721b88cd0133a7d66348d244f65d3 | [
"MIT"
] | 6 | 2017-11-18T05:54:49.000Z | 2021-03-04T08:28:46.000Z | USPTO-15K/rank-diff-wln/oracletest.py | wibrow/nips17-rexgen | fb7dea369b0721b88cd0133a7d66348d244f65d3 | [
"MIT"
] | 41 | 2017-12-13T02:32:10.000Z | 2022-01-09T06:39:40.000Z | import tensorflow as tf
from utils.nn import linearND, linear
from mol_graph import atom_fdim as adim, bond_fdim as bdim, max_nb, smiles2graph, smiles2graph_test, bond_types
from models import *
import math, sys, random
from optparse import OptionParser
import threading
from multiprocessing import Queue
import rdkit
from rdkit import Chem
TOPK = 5
parser = OptionParser()
parser.add_option("-t", "--test", dest="test_path")
parser.add_option("-p", "--cand", dest="cand_path", default=None)
parser.add_option("-a", "--ncand", dest="cand_size", default=500)
parser.add_option("-c", "--ncore", dest="core_size", default=10)
parser.add_option("-m", "--model", dest="model_path")
parser.add_option("-w", "--hidden", dest="hidden_size", default=100)
parser.add_option("-d", "--depth", dest="depth", default=1)
opts,args = parser.parse_args()
hidden_size = int(opts.hidden_size)
depth = int(opts.depth)
core_size = int(opts.core_size)
MAX_NCAND = int(opts.cand_size)
#gpu_options = tf.GPUOptions(allow_growth=True)
#session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
session = tf.Session()
_input_atom = tf.placeholder(tf.float32, [None, None, adim])
_input_bond = tf.placeholder(tf.float32, [None, None, bdim])
_atom_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_bond_graph = tf.placeholder(tf.int32, [None, None, max_nb, 2])
_num_nbs = tf.placeholder(tf.int32, [None, None])
_src_holder = [_input_atom, _input_bond, _atom_graph, _bond_graph, _num_nbs]
q = tf.FIFOQueue(100, [tf.float32, tf.float32, tf.int32, tf.int32, tf.int32])
enqueue = q.enqueue(_src_holder)
input_atom, input_bond, atom_graph, bond_graph, num_nbs = q.dequeue()
input_atom.set_shape([None, None, adim])
input_bond.set_shape([None, None, bdim])
atom_graph.set_shape([None, None, max_nb, 2])
bond_graph.set_shape([None, None, max_nb, 2])
num_nbs.set_shape([None, None])
graph_inputs = (input_atom, input_bond, atom_graph, bond_graph, num_nbs)
with tf.variable_scope("mol_encoder"):
fp_all_atoms = rcnn_wl_only(graph_inputs, hidden_size=hidden_size, depth=depth)
reactant = fp_all_atoms[0:1,:]
candidates = fp_all_atoms[1:,:]
candidates = candidates - reactant
candidates = tf.concat(0, [reactant, candidates])
with tf.variable_scope("diff_encoder"):
reaction_fp = wl_diff_net(graph_inputs, candidates, hidden_size=hidden_size, depth=depth)
reaction_fp = reaction_fp[1:]
reaction_fp = tf.nn.relu(linear(reaction_fp, hidden_size, "rex_hidden"))
score = tf.squeeze(linear(reaction_fp, 1, "score"), [1])
tk = tf.minimum(TOPK, tf.shape(score)[0])
_, pred_topk = tf.nn.top_k(score, tk)
tf.global_variables_initializer().run(session=session)
queue = Queue()
def read_data(coord):
data = []
data_f = open(opts.test_path, 'r')
cand_f = open(opts.cand_path, 'r')
for line in data_f:
items = line.split()
cand = cand_f.readline()
r = items[0]
edits = items[2]
gbonds = []
delbond = edits.split(';')[2]
newbond = edits.split(';')[3]
if len(delbond) > 0:
for s in delbond.split(','):
x,y,_ = s.split('-')
x,y = int(x)-1,int(y)-1
x,y = min(x,y),max(x,y)
gbonds.append((x,y,0))
if len(newbond) > 0:
for s in newbond.split(','):
x,y,t = s.split('-')
if float(t) == 1.5: t = 4
else: t = int(float(t))
x,y = int(x)-1,int(y)-1
x,y = min(x,y),max(x,y)
gbonds.append((x,y,t))
rex_core = set([(x,y) for x,y,_ in gbonds])
cand_bonds = list(rex_core)
for b in cand.strip("\r\n ").split():
x,y = b.split('-')
x,y = int(x)-1,int(y)-1
if (x,y) not in rex_core:
cand_bonds.append((x,y))
data.append((r,cand_bonds,gbonds))
data_len = len(data)
for it in xrange(data_len):
r,cand_bonds,gold_bonds = data[it]
r = r.split('>')[0]
ncore = core_size
while True:
src_tuple,conf = smiles2graph(r, cand_bonds[:ncore], gold_bonds, cutoff=-1)
if len(conf) <= MAX_NCAND:
break
ncore -= 1
queue.put((r,conf))
feed_map = {x:y for x,y in zip(_src_holder, src_tuple)}
session.run(enqueue, feed_dict=feed_map)
coord = tf.train.Coordinator()
t = threading.Thread(target=read_data, args=(coord,))
t.start()
saver = tf.train.Saver()
saver.restore(session, tf.train.latest_checkpoint(opts.model_path))
total = 0
idxfunc = lambda x:x.GetIntProp('molAtomMapNumber')
try:
while not coord.should_stop():
total += 1
r,conf = queue.get()
cur_pred = session.run(pred_topk)
rmol = Chem.MolFromSmiles(r)
rbonds = {}
for bond in rmol.GetBonds():
a1 = idxfunc(bond.GetBeginAtom())
a2 = idxfunc(bond.GetEndAtom())
t = bond_types.index(bond.GetBondType()) + 1
a1,a2 = min(a1,a2),max(a1,a2)
rbonds[(a1,a2)] = t
for idx in cur_pred:
for x,y,t in conf[idx]:
x,y = x+1,y+1
if ((x,y) not in rbonds and t > 0) or ((x,y) in rbonds and rbonds[(x,y)] != t):
print '%d-%d-%d' % (x,y,t),
print '|',
print
if total % 1000 == 0:
sys.stdout.flush()
except Exception as e:
print e
coord.request_stop(e)
finally:
coord.request_stop()
coord.join([t])
| 33.409639 | 111 | 0.615218 |
3c48fbc5177c2ea80306f28505f7c009cbef1cd7 | 8,949 | py | Python | homeassistant/components/deconz/light.py | tafehe/ha-core | 2478ec887aba87842bf52969b7ab1137826f7b98 | [
"Apache-2.0"
] | 5 | 2020-10-08T12:59:44.000Z | 2021-12-28T06:46:25.000Z | homeassistant/components/deconz/light.py | tafehe/ha-core | 2478ec887aba87842bf52969b7ab1137826f7b98 | [
"Apache-2.0"
] | 75 | 2020-08-05T07:22:42.000Z | 2022-03-23T21:54:57.000Z | homeassistant/components/deconz/light.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 1 | 2021-08-01T06:12:13.000Z | 2021-08-01T06:12:13.000Z | """Support for deCONZ lights."""
from __future__ import annotations
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_XY,
DOMAIN,
EFFECT_COLORLOOP,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.color import color_hs_to_xy
from .const import (
COVER_TYPES,
DOMAIN as DECONZ_DOMAIN,
LOCK_TYPES,
NEW_GROUP,
NEW_LIGHT,
POWER_PLUGS,
SIRENS,
)
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
CONTROLLER = ["Configuration tool"]
DECONZ_GROUP = "is_deconz_group"
OTHER_LIGHT_RESOURCE_TYPES = (
CONTROLLER + COVER_TYPES + LOCK_TYPES + POWER_PLUGS + SIRENS
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ lights and groups from a config entry."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_light(lights=gateway.api.lights.values()):
"""Add light from deCONZ."""
entities = []
for light in lights:
if (
light.type not in OTHER_LIGHT_RESOURCE_TYPES
and light.unique_id not in gateway.entities[DOMAIN]
):
entities.append(DeconzLight(light, gateway))
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_light
)
)
@callback
def async_add_group(groups=gateway.api.groups.values()):
"""Add group from deCONZ."""
if not gateway.option_allow_deconz_groups:
return
entities = []
for group in groups:
if not group.lights:
continue
known_groups = set(gateway.entities[DOMAIN])
new_group = DeconzGroup(group, gateway)
if new_group.unique_id not in known_groups:
entities.append(new_group)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_GROUP), async_add_group
)
)
async_add_light()
async_add_group()
class DeconzBaseLight(DeconzDevice, LightEntity):
"""Representation of a deCONZ light."""
TYPE = DOMAIN
def __init__(self, device, gateway):
"""Set up light."""
super().__init__(device, gateway)
self._attr_supported_color_modes = set()
if device.color_temp is not None:
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if device.hue is not None and device.saturation is not None:
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if device.xy is not None:
self._attr_supported_color_modes.add(COLOR_MODE_XY)
if not self._attr_supported_color_modes and device.brightness is not None:
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if not self._attr_supported_color_modes:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if device.brightness is not None:
self._attr_supported_features |= SUPPORT_FLASH
self._attr_supported_features |= SUPPORT_TRANSITION
if device.effect is not None:
self._attr_supported_features |= SUPPORT_EFFECT
@property
def color_mode(self) -> str:
"""Return the color mode of the light."""
if self._device.color_mode == "ct":
color_mode = COLOR_MODE_COLOR_TEMP
elif self._device.color_mode == "hs":
color_mode = COLOR_MODE_HS
elif self._device.color_mode == "xy":
color_mode = COLOR_MODE_XY
elif self._device.brightness is not None:
color_mode = COLOR_MODE_BRIGHTNESS
else:
color_mode = COLOR_MODE_ONOFF
return color_mode
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._device.brightness
@property
def effect_list(self):
"""Return the list of supported effects."""
return [EFFECT_COLORLOOP]
@property
def color_temp(self):
"""Return the CT color value."""
return self._device.color_temp
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return (self._device.hue / 65535 * 360, self._device.saturation / 255 * 100)
@property
def xy_color(self) -> tuple | None:
"""Return the XY color value."""
return self._device.xy
@property
def is_on(self):
"""Return true if light is on."""
return self._device.state
async def async_turn_on(self, **kwargs):
"""Turn on light."""
data = {"on": True}
if ATTR_BRIGHTNESS in kwargs:
data["brightness"] = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
data["color_temperature"] = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
if COLOR_MODE_XY in self._attr_supported_color_modes:
data["xy"] = color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
else:
data["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
data["saturation"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
if ATTR_XY_COLOR in kwargs:
data["xy"] = kwargs[ATTR_XY_COLOR]
if ATTR_TRANSITION in kwargs:
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
elif "IKEA" in self._device.manufacturer:
data["transition_time"] = 0
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data["alert"] = "select"
del data["on"]
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data["alert"] = "lselect"
del data["on"]
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
data["effect"] = "colorloop"
else:
data["effect"] = "none"
await self._device.set_state(**data)
async def async_turn_off(self, **kwargs):
"""Turn off light."""
if not self._device.state:
return
data = {"on": False}
if ATTR_TRANSITION in kwargs:
data["brightness"] = 0
data["transition_time"] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_SHORT:
data["alert"] = "select"
del data["on"]
elif kwargs[ATTR_FLASH] == FLASH_LONG:
data["alert"] = "lselect"
del data["on"]
await self._device.set_state(**data)
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {DECONZ_GROUP: self._device.type == "LightGroup"}
class DeconzLight(DeconzBaseLight):
"""Representation of a deCONZ light."""
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._device.max_color_temp or super().max_mireds
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._device.min_color_temp or super().min_mireds
class DeconzGroup(DeconzBaseLight):
"""Representation of a deCONZ group."""
def __init__(self, device, gateway):
"""Set up group and create an unique id."""
self._unique_id = f"{gateway.bridgeid}-{device.deconz_id}"
super().__init__(device, gateway)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return self._unique_id
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(DECONZ_DOMAIN, self.unique_id)},
"manufacturer": "Dresden Elektronik",
"model": "deCONZ group",
"name": self._device.name,
"via_device": (DECONZ_DOMAIN, self.gateway.api.config.bridge_id),
}
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = dict(super().extra_state_attributes)
attributes["all_on"] = self._device.all_on
return attributes
| 30.030201 | 84 | 0.627221 |
c81d0677508d66cb17944d34a5bdf7cc21ac8550 | 2,100 | py | Python | container-engine/bookshelf/model_datastore.py | stugit/cp100 | 837260c8572bba49a340f0736b06ae268d6807d8 | [
"Apache-2.0"
] | 65 | 2016-07-20T13:58:28.000Z | 2022-02-05T01:34:50.000Z | container-engine/bookshelf/model_datastore.py | stugit/cp100 | 837260c8572bba49a340f0736b06ae268d6807d8 | [
"Apache-2.0"
] | 1 | 2017-12-13T03:26:34.000Z | 2017-12-13T21:02:36.000Z | container-engine/bookshelf/model_datastore.py | stugit/cp100 | 837260c8572bba49a340f0736b06ae268d6807d8 | [
"Apache-2.0"
] | 101 | 2016-05-20T16:04:12.000Z | 2022-01-31T01:01:08.000Z | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app
from gcloud import datastore
builtin_list = list
def init_app(app):
pass
def get_client():
return datastore.Client(current_app.config['PROJECT_ID'])
def from_datastore(entity):
"""Translates Datastore results into the format expected by the
application.
Datastore typically returns:
[Entity{key: (kind, id), prop: val, ...}]
This returns:
{id: id, prop: val, ...}
"""
if not entity:
return None
if isinstance(entity, builtin_list):
entity = entity.pop()
entity['id'] = entity.key.id
return entity
def list(limit=10, cursor=None):
ds = get_client()
query = ds.query(kind='Book', order=['title'])
it = query.fetch(limit=limit, start_cursor=cursor)
entities, more_results, cursor = it.next_page()
entities = builtin_list(map(from_datastore, entities))
return entities, cursor.decode('utf-8') if len(entities) == limit else None
def read(id):
ds = get_client()
key = ds.key('Book', int(id))
results = ds.get(key)
return from_datastore(results)
def update(data, id=None):
ds = get_client()
if id:
key = ds.key('Book', int(id))
else:
key = ds.key('Book')
entity = datastore.Entity(
key=key,
exclude_from_indexes=['description'])
entity.update(data)
ds.put(entity)
return from_datastore(entity)
create = update
def delete(id):
ds = get_client()
key = ds.key('Book', int(id))
ds.delete(key)
| 23.863636 | 79 | 0.667619 |
322cb767db3137fc1c757711f5b1543943d9a906 | 10,044 | py | Python | test/functional/p2p-acceptblock.py | DanielLuci/testcoin | c749274fe783e530ba9a5588c7f06e23a27cd7a7 | [
"MIT"
] | 59 | 2017-06-28T22:05:33.000Z | 2022-03-30T20:42:17.000Z | test/functional/p2p-acceptblock.py | DanielLuci/testcoin | c749274fe783e530ba9a5588c7f06e23a27cd7a7 | [
"MIT"
] | 18 | 2017-05-29T06:34:44.000Z | 2018-01-23T00:42:07.000Z | test/functional/p2p-acceptblock.py | DanielLuci/testcoin | c749274fe783e530ba9a5588c7f06e23a27cd7a7 | [
"MIT"
] | 60 | 2017-12-29T06:25:11.000Z | 2022-02-17T16:37:29.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-whitelist=127.0.0.1"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted)
white_node = NodeConnCB() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
self.log.info("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
self.log.info("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.64 | 107 | 0.667164 |
7556de256bbca57e64a7b2c8dbdca009598f50a9 | 2,137 | py | Python | tests/tools/test_histogram2d.py | dgorelik/differential-privacy-library | 5a7a267c591320036615a52dfad1918dc3718e62 | [
"MIT"
] | 1 | 2020-05-03T06:06:44.000Z | 2020-05-03T06:06:44.000Z | tests/tools/test_histogram2d.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | null | null | null | tests/tools/test_histogram2d.py | dohmatob/differential-privacy-library | 1a17bf0e3bf7d18d5c19258abbf81c27fd9a5e16 | [
"MIT"
] | 1 | 2022-02-23T13:56:19.000Z | 2022-02-23T13:56:19.000Z | import numpy as np
from unittest import TestCase
from diffprivlib.tools.histograms import histogram2d
from diffprivlib.utils import global_seed, PrivacyLeakWarning
class TestHistogram2d(TestCase):
def test_no_params(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y)
self.assertIsNotNone(res)
def test_no_range(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y, epsilon=1)
self.assertIsNotNone(res)
def test_missing_range(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
with self.assertWarns(PrivacyLeakWarning):
res = histogram2d(x, y, epsilon=1, range=[(0, 10), None])
self.assertIsNotNone(res)
def test_same_edges(self):
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
_, edges_x, edges_y = np.histogram2d(x, y, bins=3, range=[(0, 10), (0, 10)])
_, dp_edges_x, dp_edges_y = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)])
self.assertTrue((edges_x == dp_edges_x).all())
self.assertTrue((edges_y == dp_edges_y).all())
def test_different_result(self):
global_seed(3141592653)
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
hist, _, _ = np.histogram2d(x, y, bins=3, range=[(0, 10), (0, 10)])
dp_hist, _, _ = histogram2d(x, y, epsilon=0.1, bins=3, range=[(0, 10), (0, 10)])
# print("Non-private histogram: %s" % hist)
# print("Private histogram: %s" % dp_hist)
self.assertTrue((hist != dp_hist).any())
def test_density(self):
global_seed(3141592653)
x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
dp_hist, _, _ = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)
# print(dp_hist.sum())
self.assertAlmostEqual(dp_hist.sum(), 1.0 * (3 / 10) ** 2)
| 35.616667 | 100 | 0.565278 |
70c6ee8f6aea0e9e4c6d656c4b0bb2a23cf14adf | 1,011 | py | Python | virtual/lib/python3.6/site-packages/django_celery_beat/migrations/0006_periodictask_priority.py | lorrainekamanda/NewsRestApi | 3be82ca066f26f7fde4e7a265c2e46dd4e86bd85 | [
"MIT"
] | null | null | null | virtual/lib/python3.6/site-packages/django_celery_beat/migrations/0006_periodictask_priority.py | lorrainekamanda/NewsRestApi | 3be82ca066f26f7fde4e7a265c2e46dd4e86bd85 | [
"MIT"
] | 2 | 2021-05-12T06:25:57.000Z | 2022-03-01T04:16:03.000Z | env/lib/python3.9/site-packages/django_celery_beat/migrations/0006_periodictask_priority.py | simotwo/AbileneParadox-ddd | c85961efb37aba43c0d99ed1c36d083507e2b2d3 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-10-22 05:20
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
# depends on higher numbers due to a squashed migration
# that was later removed due to migration issues it caused
('django_celery_beat', '0005_add_solarschedule_events_choices'),
('django_celery_beat', '0006_auto_20180210_1226'),
('django_celery_beat', '0006_auto_20180322_0932'),
('django_celery_beat', '0007_auto_20180521_0826'),
('django_celery_beat', '0008_auto_20180914_1922'),
]
operations = [
migrations.AddField(
model_name='periodictask',
name='priority',
field=models.PositiveIntegerField(
blank=True,
default=None,
null=True,
validators=[django.core.validators.MaxValueValidator(255)],
verbose_name='priority'),
),
]
| 34.862069 | 75 | 0.634026 |
15e63eec749b681321191232cde7ff71afe0dc70 | 1,209 | py | Python | base/site-packages/beecloud-python/sdk/setup.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/beecloud-python/sdk/setup.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/beecloud-python/sdk/setup.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # -*- coding: utf-8 -*-
"""
setup script.
:created by xuanzhui on 2015/12/24.
:copyright (c) 2015 BeeCloud.
:license: MIT, see LICENSE for more details.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='beecloud',
version='3.6.0',
packages=['beecloud'],
url='https://beecloud.cn/',
license='MIT License',
author='xuanzhui',
author_email='david@beecloud.cn',
description='beecloud, make payment simpler',
install_requires=['requests'],
zip_safe=False,
platforms='2.7, 3.4, 3.5, 3.6',
keywords=('beecloud', 'pay'),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 30.225 | 70 | 0.603805 |
4448ca63035062e9d543b04631dc8b4799599d80 | 11,349 | py | Python | parcels/kernels/EOSseawaterproperties.py | angus-g/parcels | 237d85b399228baa331dc7958247198965372def | [
"MIT"
] | null | null | null | parcels/kernels/EOSseawaterproperties.py | angus-g/parcels | 237d85b399228baa331dc7958247198965372def | [
"MIT"
] | null | null | null | parcels/kernels/EOSseawaterproperties.py | angus-g/parcels | 237d85b399228baa331dc7958247198965372def | [
"MIT"
] | 1 | 2019-10-21T15:08:43.000Z | 2019-10-21T15:08:43.000Z | """Collection of pre-built eos sea water property kernels"""
import math
__all__ = ['PressureFromLatDepth', 'AdiabticTemperatureGradient', 'PtempFromTemp', 'TempFromPtemp', 'UNESCODensity']
def PressureFromLatDepth(particle, fieldset, time):
"""
Calculates pressure in dbars from depth in meters and latitude.
Returns
-------
p : array_like
pressure [db]
References
----------
.. [1] Saunders, Peter M., 1981: Practical Conversion of Pressure to Depth.
J. Phys. Oceanogr., 11, 573-574.
doi: 10.1175/1520-0485(1981)011<0573:PCOPTD>2.0.CO;2
"""
# Angle conversions.
deg2rad = math.pi / 180.0
X = math.sin(max(particle.lat * deg2rad, -1*particle.lat * deg2rad))
C1 = 5.92e-3 + math.pow(X, 2) * 5.25e-3
particle.pressure = ((1 - C1) - math.pow(((math.pow((1 - C1), 2)) - (8.84e-6 * particle.depth)), 0.5)) / 4.42e-6
def AdiabticTemperatureGradient(particle, fieldset, time):
"""
Calculates adiabatic temperature gradient as per UNESCO 1983 routines.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db]
Returns
-------
adtg : array_like
adiabatic temperature gradient [℃ db :sup:`-1`]
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s, t, pres = particle.S, particle.T, particle.pressure
T68 = t * 1.00024
a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10]
b = [1.8932e-6, -4.2393e-8]
c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14]
d = [-1.1351e-10, 2.7759e-12]
e = [-4.6206e-13, 1.8676e-14, -2.1687e-16]
particle.adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68
+ (b[0] + b[1] * T68) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68)
+ (d[0] + d[1] * T68) * (s - 35)) * pres
+ (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres)
def PtempFromTemp(particle, fieldset, time):
"""
Calculates potential temperature as per UNESCO 1983 report.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : array_like
reference pressure [db], default = 0
Returns
-------
pt : array_like
potential temperature relative to PR [℃ (ITS-90)]
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s = fieldset.psu_salinity[time, particle.depth, particle.lat, particle.lon]
t = fieldset.temperature[time, particle.depth, particle.lat, particle.lon]
pres, pr = particle.pressure, fieldset.refpressure
# First calculate the adiabatic temperature gradient adtg
# Convert ITS-90 temperature to IPTS-68
T68 = t * 1.00024
a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10]
b = [1.8932e-6, -4.2393e-8]
c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14]
d = [-1.1351e-10, 2.7759e-12]
e = [-4.6206e-13, 1.8676e-14, -2.1687e-16]
adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68
+ (b[0] + b[1] * T68) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68)
+ (d[0] + d[1] * T68) * (s - 35)) * pres
+ (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres)
# Theta1.
del_P = pr - pres
del_th = del_P * adtg
th = T68 + 0.5 * del_th
q = del_th
pprime = pres + 0.5 * del_P
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
# Theta2.
del_th = del_P * adtg
th = th + (1 - 1 / 2 ** 0.5) * (del_th - q)
q = (2 - 2 ** 0.5) * del_th + (-2 + 3 / 2 ** 0.5) * q
# Theta3.
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
del_th = del_P * adtg
th = th + (1 + 1 / 2 ** 0.5) * (del_th - q)
q = (2 + 2 ** 0.5) * del_th + (-2 - 3 / 2 ** 0.5) * q
# Theta4.
pprime = pres + del_P
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
del_th = del_P * adtg
particle.potemp = (th + (del_th - 2 * q) / 6) / 1.00024
def TempFromPtemp(particle, fieldset, time):
"""
Calculates temperature from potential temperature at the reference
pressure PR and in situ pressure P.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
pt(p) : array_like
potential temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : array_like
reference pressure [db]
Returns
-------
temp : array_like
temperature [℃ (ITS-90)]
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s = fieldset.psu_salinity[time, particle.depth, particle.lat, particle.lon]
t = fieldset.potemperature[time, particle.depth, particle.lat, particle.lon]
pres, pr = fieldset.refpressure, particle.pressure # The order should be switched here
# Convert ITS-90 temperature to IPTS-68
T68 = t * 1.00024
a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10]
b = [1.8932e-6, -4.2393e-8]
c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14]
d = [-1.1351e-10, 2.7759e-12]
e = [-4.6206e-13, 1.8676e-14, -2.1687e-16]
adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68
+ (b[0] + b[1] * T68) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68)
+ (d[0] + d[1] * T68) * (s - 35)) * pres
+ (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres)
# Theta1.
del_P = pr - pres
del_th = del_P * adtg
th = T68 + 0.5 * del_th
q = del_th
pprime = pres + 0.5 * del_P
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
# Theta2.
del_th = del_P * adtg
th = th + (1 - 1 / 2 ** 0.5) * (del_th - q)
q = (2 - 2 ** 0.5) * del_th + (-2 + 3 / 2 ** 0.5) * q
# Theta3.
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
del_th = del_P * adtg
th = th + (1 + 1 / 2 ** 0.5) * (del_th - q)
q = (2 + 2 ** 0.5) * del_th + (-2 - 3 / 2 ** 0.5) * q
# Theta4.
pprime = pres + del_P
adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th
+ (b[0] + b[1] * th) * (s - 35)
+ ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th)
+ (d[0] + d[1] * th) * (s - 35)) * pprime
+ (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime)
del_th = del_P * adtg
particle.temp = (th + (del_th - 2 * q) / 6) / 1.00024
def UNESCODensity(particle, fieldset, time):
# This is a kernel which calculates the UNESCO density
# (https://link.springer.com/content/pdf/bbm%3A978-3-319-18908-6%2F1.pdf),
# from pressure, temperature and salinity.
# density in [kg/m3] if temperature in degrees C, salinity in PSU,
# pressure in bar.
a0 = 999.842594
a1 = 0.06793953
a2 = -0.009095290
a3 = 0.0001001685
a4 = -0.000001120083
a5 = 0.000000006536332
S = fieldset.psu_salinity[time, particle.depth, particle.lat, particle.lon] # salinity
T = fieldset.cons_temperature[time, particle.depth, particle.lat, particle.lon] # temperature
P = fieldset.cons_pressure[time, particle.depth, particle.lat, particle.lon] # pressure
rsmow = a0 + a1*T + a2*math.pow(T, 2) + a3*math.pow(T, 3) + \
a4*math.pow(T, 4) + a5*math.pow(T, 5)
b0 = 0.82449
b1 = -0.0040899
b2 = 0.000076438
b3 = -0.00000082467
b_four = 0.0000000053875
c0 = -0.0057246
c1 = 0.00010227
c2 = -0.0000016546
d0 = 0.00048314
B1 = b0 + b1*T + b2*math.pow(T, 2) + b3*math.pow(T, 3) + b_four*math.pow(T, 4)
C1 = c0 + c1*T + c2*math.pow(T, 2)
rho_st0 = rsmow + B1*S + C1*math.pow(S, 1.5) + d0*math.pow(S, 2)
e0 = 19652.21
e1 = 148.4206
e2 = -2.327105
e3 = 0.01360477
e4 = -0.00005155288
f0 = 54.6746
f1 = -0.603459
f2 = 0.01099870
f3 = -0.00006167
g0 = 0.07944
g1 = 0.016483
g2 = -0.00053009
Kw = e0 + e1*T + e2*math.pow(T, 2) + e3*math.pow(T, 3) + e4*math.pow(T, 4)
F1 = f0 + f1*T + f2*math.pow(T, 2) + f3*math.pow(T, 3)
G1 = g0 + g1*T + g2*math.pow(T, 2)
K_ST0 = Kw + F1*S + G1*math.pow(S, 1.5)
h0 = 3.2399
h1 = 0.00143713
h2 = 0.000116092
h3 = -0.000000577905
i0 = 0.0022838
i1 = -0.000010981
i2 = -0.0000016078
j0 = 0.000191075
k0 = 0.0000850935
k1 = -0.00000612293
k2 = 0.000000052787
m0 = -0.00000099348
m1 = 0.000000020816
m2 = 0.00000000091697
Aw = h0 + h1*T + h2*math.pow(T, 2) + h3*math.pow(T, 3)
A1 = Aw + (i0 + i1*T + i2*math.pow(T, 2))*S + j0*math.pow(S, 1.5)
Bw = k0 + k1*T + k2*math.pow(T, 2)
B2 = Bw + (m0 + m1*T + m2*math.pow(T, 2))*S
K_STp = K_ST0 + A1*P + B2*math.pow(T, 2)
particle.density = rho_st0/(1-(P/K_STp))
| 32.518625 | 116 | 0.512116 |
368211f89c27502be4cd8bf4594f4a0162f2a92b | 2,512 | py | Python | apps/account/tests/test_urls.py | convers39/IR-ecommerce | 57665aace80c14b52a40a9576de8b433a13f8f3f | [
"MIT"
] | 2 | 2021-10-12T11:04:57.000Z | 2022-01-04T08:17:11.000Z | apps/account/tests/test_urls.py | convers39/IR-ecommerce | 57665aace80c14b52a40a9576de8b433a13f8f3f | [
"MIT"
] | null | null | null | apps/account/tests/test_urls.py | convers39/IR-ecommerce | 57665aace80c14b52a40a9576de8b433a13f8f3f | [
"MIT"
] | 1 | 2021-04-30T16:43:29.000Z | 2021-04-30T16:43:29.000Z | from django.test import SimpleTestCase
from django.urls import reverse, resolve
from account.views import (LoginView, LogoutView, RegisterView, ActivateView, PasswordResetView,
AccountCenterView, OrderListView, AddressView, WishlistView)
class TestAccountUrls(SimpleTestCase):
def test_login_url(self):
url = reverse('account:login')
self.assertEqual(url, '/account/login/')
self.assertEqual(resolve(url).func.__name__,
LoginView.as_view().__name__)
def test_logout_url(self):
url = reverse('account:logout')
self.assertEqual(url, '/account/logout/')
self.assertEqual(resolve(url).func.__name__,
LogoutView.as_view().__name__)
def test_register_url(self):
url = reverse('account:register')
self.assertEqual(url, '/account/register/')
self.assertEqual(resolve(url).func.__name__,
RegisterView.as_view().__name__)
def test_activate_url(self):
url = reverse('account:activate', kwargs={'token': 'eanovu83'})
self.assertEqual(url, '/account/activate/eanovu83/')
self.assertEqual(resolve(url).func.__name__,
ActivateView.as_view().__name__)
def test_account_center_url(self):
url = reverse('account:center')
self.assertEqual(url, '/account/')
self.assertEqual(resolve(url).func.__name__,
AccountCenterView.as_view().__name__)
def test_account_address_url(self):
url = reverse('account:address')
self.assertEqual(url, '/account/address/')
self.assertEqual(resolve(url).func.__name__,
AddressView.as_view().__name__)
def test_account_order_url(self):
url = reverse('account:order')
self.assertEqual(url, '/account/order/')
self.assertEqual(resolve(url).func.__name__,
OrderListView.as_view().__name__)
def test_account_wishlist_url(self):
url = reverse('account:wishlist')
self.assertEqual(url, '/account/wishlist/')
self.assertEqual(resolve(url).func.__name__,
WishlistView.as_view().__name__)
def test_password_reset_url(self):
url = reverse('account:password-reset')
self.assertEqual(url, '/account/passwordreset/')
self.assertEqual(resolve(url).func.__name__,
PasswordResetView.as_view().__name__)
| 39.873016 | 96 | 0.633758 |
416caa3e33d67759735d6ba752956d95348a72da | 5,026 | py | Python | analyser/analysis/pen_click_mean_deviations.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | 1 | 2022-03-30T09:45:25.000Z | 2022-03-30T09:45:25.000Z | analyser/analysis/pen_click_mean_deviations.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | analyser/analysis/pen_click_mean_deviations.py | michigg/web-simultaneous-recording-tool | 67db83f6e34d9cb726c69b4e448fed3604a43618 | [
"MIT"
] | null | null | null | """
Generates Plot of dba values per device per distance
"""
import sys
import pandas as pd
from utils import audio_calcs
from utils.data_loader import Loader
import logging
from utils.output import Output
INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas.pkl'
OUTPUT_DIR = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Graphs/ClickMean/BoxPlots'
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s]:\n %(message)s",
handlers=[
logging.FileHandler(f"{OUTPUT_DIR}/analyse.log", mode='w'),
logging.StreamHandler(sys.stdout)
]
)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
def clean_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
logger.info('clean_dataframe: start')
new_dataframe = dataframe.droplevel('TestID')
new_dataframe = new_dataframe.droplevel('WindowingFunction')
new_dataframe = new_dataframe.droplevel('TestIteration')
new_dataframe = new_dataframe.droplevel('DistanceKey')
logger.info('clean_dataframe: done')
logger.info(new_dataframe)
return new_dataframe
# Level 2 Aggregation
def plot_frog_size_by_distance(frog_size: str, clicks: int, dataframe: pd.DataFrame):
logger.info(f'plot_frog_size: frog_size: {frog_size}')
logger.info(f'plot_frog_size: clicks: {clicks}')
result = dataframe.query('FrogSize==@frog_size and Clicks==@clicks')
result = result.droplevel('FrogSize')
result = result.unstack('DistanceKey')
result = result.droplevel(0, axis=1)
logger.info('Prepared Dataframe')
logger.info(result)
Output.box_plot(
f'Deviations Between Frogs By Distance\nFrog Size {frog_size} Clicks {clicks}',
result,
file_path=f'{OUTPUT_DIR}',
file_name=f'distance-frog_size_{frog_size}-clicks_{clicks}'
)
def plot_frog_size_by_positions(frog_size: str, clicks: int, dataframe: pd.DataFrame):
logger.info(f'plot_frog_size: frog_size: {frog_size}')
logger.info(f'plot_frog_size: clicks: {clicks}')
result = dataframe.query('FrogSize==@frog_size and Clicks==@clicks')
result = result.droplevel('FrogSize')
result = result.unstack('FrogPosition')
result = result.droplevel(0, axis=1)
logger.info('Prepared Dataframe')
logger.info(result)
Output.box_plot(
f'Deviations Between Frogs By Position\nFrog Size {frog_size} Clicks {clicks}',
result,
file_path=f'{OUTPUT_DIR}',
file_name=f'position-frog_size_{frog_size}-clicks_{clicks}'
)
def plot_positions_at_click_count(clicks: int, dataframe: pd.DataFrame):
logger.info(f'plot_frog_size: clicks: {clicks}')
result = dataframe.query('Clicks==@clicks')
result = result.unstack('FrogPosition')
result = result.droplevel(0, axis=1)
logger.info('Prepared Dataframe')
logger.info(result)
Output.box_plot(
f'Deviations Between Frogs By Position\nClicks {clicks}',
result,
file_path=f'{OUTPUT_DIR}',
file_name=f'position-clicks_{clicks}'
)
# Level 1 Aggregation
def plot_clicks(dataframe: pd.DataFrame):
logger.info(f'plot_clicks')
result = dataframe.unstack('Clicks')
result = result.droplevel(0, axis=1)
logger.info('plot_clicks: prepared dataframe')
logger.info(result)
Output.box_plot(
f'Deviations In dB(A) Between Pens By Click Count',
result,
file_path=f'{OUTPUT_DIR}/level1',
file_name=f'clicks'
)
def plot_distances(dataframe: pd.DataFrame):
logger.info(f'plot_distances')
result = dataframe.unstack('DistanceKey')
result = result.droplevel(0, axis=1)
logger.info('plot_distances: prepared dataframe')
logger.info(result)
Output.box_plot(
f'Deviations In dB(A) Between Pens By Distance',
result,
file_path=f'{OUTPUT_DIR}/level1',
file_name=f'distances'
)
def main():
devices = Loader.load_analysis_from_pickle(INPUT_DEVICES)
devices = audio_calcs.calc_dataframe_click_mean(devices)
logger.info('Prepared Dataframe')
logger.info(devices)
devices = devices.unstack('PenId')
logger.info('Prepared Dataframe')
logger.info(devices)
devices = devices.std(axis=1).to_frame()
devices = devices.droplevel('TestIteration')
devices = devices.droplevel('WindowingFunction')
devices = devices.droplevel('SampleRate')
devices = devices.droplevel('BufferSize')
devices = devices.droplevel('TestID')
devices = devices.droplevel('PenBrand')
logger.info('Prepared Dataframe')
logger.info(devices)
distance_keys = devices.index.get_level_values('DistanceKey').unique()
clicks = devices.index.get_level_values('Clicks').unique()
plot_clicks(devices)
plot_distances(devices)
if __name__ == '__main__':
main()
| 33.284768 | 128 | 0.709511 |
5dfe005dd95855c95c38eede6a7f7ff461086a03 | 129,479 | py | Python | dev/Tools/build/waf-1.7.13/lmbrwaflib/cryengine_modules.py | pawandayma/lumberyard | e178f173f9c21369efd8c60adda3914e502f006a | [
"AML"
] | null | null | null | dev/Tools/build/waf-1.7.13/lmbrwaflib/cryengine_modules.py | pawandayma/lumberyard | e178f173f9c21369efd8c60adda3914e502f006a | [
"AML"
] | null | null | null | dev/Tools/build/waf-1.7.13/lmbrwaflib/cryengine_modules.py | pawandayma/lumberyard | e178f173f9c21369efd8c60adda3914e502f006a | [
"AML"
] | null | null | null | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Original file Copyright Crytek GMBH or its affiliates, used under license.
#
from waflib.Configure import conf, ConfigurationContext
from waflib.Build import BuildContext
from waflib.TaskGen import feature, before_method, after_method
from waflib.Options import OptionsContext
from waflib import Utils, Logs, Errors, Task, Node
from os import stat
from cry_utils import append_kw_entry, append_to_unique_list, sanitize_kw_input_lists, clean_duplicates_in_list, prepend_kw_entry, get_configuration
from copy_tasks import should_overwrite_file,fast_copy2
from collections import defaultdict
from third_party import is_third_party_uselib_configured, get_third_party_platform_name, get_third_party_configuration_name
from gems import Gem
from settings_manager import LUMBERYARD_SETTINGS
from build_configurations import ALIAS_TO_PLATFORMS_MAP
from utils import is_value_true
import os, stat, errno, json, re, threading, inspect, copy
from lumberyard import add_platform_root
COMMON_INPUTS = [
'additional_settings',
'export_definitions',
'meta_includes',
'file_list',
'use', # module dependency
'defines',
'export_defines',
'includes',
'export_includes',
'cxxflags',
'cflags',
'lib', # shared
'libpath', # shared
'stlib', # static
'stlibpath', # static
'linkflags',
'framework',
'frameworkpath',
'rpath',
'features',
'enable_rtti',
'remove_release_define',
'uselib',
'mirror_artifacts_to_include',
'mirror_artifacts_to_exclude',
'output_folder',
'source_artifacts_include',
'source_artifacts_exclude',
'mirror_artifacts_to_include',
'mirror_artifacts_to_exclude',
'copy_external',
'copy_dependent_files',
'copyright_org', # optional keyword to tag the file's originator company
'dx12_only', # Option to build only if dx12 was detected on the host machine
'additional_manifests'
]
SANITIZE_INPUTS = COMMON_INPUTS + ['output_file_name', 'files', 'winres_includes', 'winres_defines']
class BuildTargetType(object):
"""
Supported WAF target types for build_ commands
"""
SHARED_LIB = "shlib"
STATIC_LIB = "stlib"
APPLICATION = "program"
FILE_CONTAINER = "container"
# Table that maps the the build pattern strings and waf features based on the type of build (BuildTargetType)
BUILD_PROCESS_TABLE = {
BuildTargetType.SHARED_LIB: (['cshlib_PATTERN', 'cstlib_PATTERN'],
'shlib'
),
BuildTargetType.STATIC_LIB: (['cstlib_PATTERN'],
'stlib'
),
BuildTargetType.APPLICATION: (['cprogram_PATTERN'],
'program'
)
}
BUILD_FUNC_KW = '__build_func__'
BUILD_TYPE_KW = '__build_type__'
def build_func(f, build_type):
"""
Custom decorator that is similar to the @conf decorator except that it is intended to mark
build functions specifically. All build functions must be decorated with this decorator
:param f: build method to bind
:type f: function
:parm build_type: The WAF build type (see BuildTargetType)
:type build_type: string
"""
def fun(*k, **kw):
kw[BUILD_FUNC_KW] = f.__name__
kw[BUILD_TYPE_KW] = build_type
result = f(*k, **kw)
return result
setattr(OptionsContext, f.__name__, fun)
setattr(ConfigurationContext, f.__name__, fun)
setattr(BuildContext, f.__name__, fun)
return f
def build_shlib(f):
return build_func(f, BuildTargetType.SHARED_LIB)
def build_stlib(f):
return build_func(f, BuildTargetType.STATIC_LIB)
def build_program(f):
return build_func(f, BuildTargetType.APPLICATION)
def build_file_container(f):
return build_func(f, BuildTargetType.FILE_CONTAINER)
def AddGlobalKeywords(ctx, kw):
"""
Helper function to add the global keywords that are added for every single compile ever.
Happens at the very start before expansion of these keywords.
Keep this list as small as possible for maximum flexibility but if you find yourself adding something to every single
WSCRIPT it might be far better to just add it here so you can remove it here in the future all in one place.
"""
pass
CXX_EXT_LIST = ('.c', '.C', '.cc', '.CC', '.cpp', '.CPP')
@conf
def is_cxx_file(self, file_name):
return file_name.endswith(CXX_EXT_LIST)
###############################################################################
# DX12 detection and configuration. DX12_INCLUDES and DX12_LIBPATH are set in compile_rules_win_x64_host
@conf
def has_dx12(conf):
result = conf.does_support_dx12()
if result and any(result):
return True
return False
#############################################################################
def SanitizeInput(ctx, kw):
# Sanitize the inputs kws that should be list inputs
sanitize_kw_input_lists(SANITIZE_INPUTS, kw)
# Recurse for additional settings
if 'additional_settings' in kw:
for setting in kw['additional_settings']:
SanitizeInput(ctx, setting)
def RegisterVisualStudioFilter(ctx, kw):
"""
Util-function to register each provided visual studio filter parameter in a central lookup map
"""
if not 'vs_filter' in kw:
ctx.fatal('Mandatory "vs_filter" task generater parameter missing in %s/wscript' % ctx.path.abspath())
if not hasattr(ctx, 'vs_project_filters'):
ctx.vs_project_filters = {}
ctx.vs_project_filters[ kw['target' ] ] = kw['vs_filter']
def AssignTaskGeneratorIdx(ctx, kw):
"""
Util-function to assing a unique idx to prevent concurrency issues when two task generator output the same file.
"""
if not hasattr(ctx, 'index_counter'):
ctx.index_counter = 0
if not hasattr(ctx, 'index_map'):
ctx.index_map = {}
# Use a path to the wscript and the actual taskgenerator target as a unqiue key
key = ctx.path.abspath() + '___' + kw['target']
if key in ctx.index_map:
kw['idx'] = ctx.index_map.get(key)
else:
ctx.index_counter += 1
kw['idx'] = ctx.index_map[key] = ctx.index_counter
append_kw_entry(kw,'features','parse_vcxproj')
def SetupRunTimeLibraries(ctx, kw, overwrite_settings = None):
"""
Util-function to set the correct flags and defines for the runtime CRT (and to keep non windows defines in sync with windows defines)
By default CryEngine uses the "Multithreaded, dynamic link" variant (/MD)
"""
runtime_crt = 'dynamic' # Global Setting
if overwrite_settings: # Setting per Task Generator Type
runtime_crt = overwrite_settings
if kw.get('force_static_crt', False): # Setting per Task Generator
runtime_crt = 'static'
if kw.get('force_dynamic_crt', False): # Setting per Task Generator
runtime_crt = 'dynamic'
if runtime_crt != 'static' and runtime_crt != 'dynamic':
ctx.fatal('Invalid Settings: "%s" for runtime_crt' % runtime_crt )
crt_flag = []
link_flag = []
is_debug = 'debug' in get_configuration(ctx, kw['target'])
is_msvc = ctx.env['CC_NAME'] == 'msvc'
is_clang_windows = 'win_x64_clang' == ctx.env['PLATFORM']
if runtime_crt == 'static':
append_kw_entry(kw, 'defines', ['_MT'])
if is_msvc:
if is_debug:
crt_flag = ['/MTd']
else:
crt_flag = ['/MT']
elif is_clang_windows:
if is_debug:
link_flag = ['/DEFAULTLIB:libcmtd']
else:
link_flag = ['/DEFAULTLIB:libcmt']
else: # runtime_crt == 'dynamic':
append_kw_entry(kw, 'defines', ['_MT', '_DLL'])
if is_msvc:
if is_debug:
crt_flag = ['/MDd']
else:
crt_flag = ['/MD']
elif is_clang_windows:
if is_debug:
link_flag = ['/DEFAULTLIB:msvcrtd']
else:
link_flag = ['/DEFAULTLIB:msvcrt']
append_kw_entry(kw, 'cflags', crt_flag)
append_kw_entry(kw, 'cxxflags', crt_flag)
append_kw_entry(kw, 'linkflags', link_flag)
def TrackFileListChanges(ctx, kw):
"""
Util function to ensure file lists are correctly tracked regardless of current target platform
"""
def _to_list( value ):
""" Helper function to ensure a value is always a list """
if isinstance(value,list):
return value
return [ value ]
files_to_track = []
kw['waf_file_entries'] = []
# Collect all file list entries
for (key,value) in kw.items():
if 'file_list' in key:
files_to_track += _to_list(value)
# Collect potential file lists from additional options
if 'additional_settings' in key:
for settings_container in kw['additional_settings']:
for (key2,value2) in settings_container.items():
if 'file_list' in key2:
files_to_track += _to_list(value2)
# Remove duplicates
files_to_track = list(set(files_to_track))
# Add results to global lists
for file in files_to_track:
file_node = ctx.path.make_node(file)
append_kw_entry(kw,'waf_file_entries',[ file_node ])
def LoadFileLists(ctx, kw, file_lists):
"""
Util function to extract a list of needed source files, based on uber files and current command
It expects that kw['file_list'] points to a valid file, containing a JSON file with the following mapping:
Dict[ <UberFile> -> Dict[ <Project Filter> -> List[Files] ] ]
"""
def _MergeFileList(in_0, in_1):
""" Merge two file lists """
result = dict(in_0)
for (uber_file,project_filter) in in_1.items():
for (filter_name,file_list) in project_filter.items():
for file in file_list:
if not uber_file in result:
result[uber_file] = {}
if not filter_name in result[uber_file]:
result[uber_file][filter_name] = []
result[uber_file][filter_name].append(file)
return result
def _DisableUberFile(ctx, project_filter_list, files_marked_for_exclusion):
for (filter_name, file_list) in project_filter_list.items():
if any(ctx.path.make_node(file).abspath().lower() in files_marked_for_exclusion for file in file_list): # if file in exclusion list
return True
return False
task_generator_files = set() # set of all files in this task generator (store as abspath to be case insenstive)
file_to_project_filter = {}
uber_file_to_file_list = {}
file_list_to_source = {}
file_list_content = {}
source_files = set()
no_uber_file_files = set()
header_files = set()
objc_source_files = set()
qt_source_files = set()
resource_files = set()
plist_files = set()
uber_files = set()
other_files = set()
target = kw['target']
found_pch = False
pch_file = kw.get('pch', '')
platform = ctx.env['PLATFORM']
uber_file_folder = ctx.get_bintemp_folder_node().make_node('uber_files/{}'.format(target))
pch_node = ctx.path.make_node(pch_file)
# Keep track of files per waf_file spec and be ready to identify duplicate ones per file
file_list_to_file_collection = dict()
file_list_to_duplicate_file_collection = dict()
has_duplicate_files = False
# Apply project override
disable_uber_files_for_project = ctx.get_project_overrides(target).get('exclude_from_uber_file', False)
files_marked_for_uber_file_exclusion = []
if not disable_uber_files_for_project:
for key, value in ctx.get_file_overrides(target).iteritems():
if value.get('exclude_from_uber_file', False):
files_marked_for_uber_file_exclusion.append(key)
# Load file lists and build all needed lookup lists
for file_list_file in file_lists:
# Prevent processing the same file twice
if file_list_file in file_list_to_file_collection:
continue
# Prepare to collect the files per waf_file spec
file_list_to_file_collection[file_list_file] = set()
file_list_to_duplicate_file_collection[file_list_file] = set()
# Read *.waf_file from disc
file_list = ctx.read_file_list(file_list_file)
# Make the file list relative to the .waf_files file
file_list_relative_dir = os.path.dirname(file_list_file)
if file_list_relative_dir != '':
for (uber_file, project_filter_list) in file_list.items():
for (filter_name, file_entries) in project_filter_list.items():
relative_file_entries = []
for file in file_entries:
if file.startswith('@ENGINE@'):
relative_file_entries.append(ctx.engine_node.make_node(file[len('@ENGINE@'):]).abspath())
elif os.path.isabs(file):
relative_file_entries.append(file)
else:
relative_file_entries.append(os.path.join(file_list_relative_dir, file))
project_filter_list[filter_name] = relative_file_entries
# configure uber files
if not disable_uber_files_for_project:
# if there's a wart on this filename, use it as the token for generating uber file names
# e.g. AzFramework_win.waf_files -> _win
file_list_token = re.sub(target, '', file_list_file, flags=re.IGNORECASE).replace('.waf_files', '')
file_list = ctx.map_uber_files(file_list, file_list_token, target)
file_list_content = _MergeFileList(file_list_content, file_list)
# Build various mappings/lists based in file just
for (uber_file, project_filter_list) in file_list.items():
# Disable uber file usage if defined by override parameter
disable_uber_file = disable_uber_files_for_project or _DisableUberFile(ctx, project_filter_list, files_marked_for_uber_file_exclusion)
if disable_uber_file:
Logs.debug('[Option Override] - %s - Disabled uber file "%s"' %(target, uber_file))
generate_uber_file = uber_file != 'none' and uber_file != 'NoUberFile' and not disable_uber_file # TODO: Deprecate 'NoUberfile'
if generate_uber_file:
if uber_file in uber_files:
ctx.cry_file_error('[%s] UberFile "%s" was specifed twice. Please choose a different name' % (kw['target'], uber_file), file_list_file)
# Collect Uber file related information
uber_file_node = uber_file_folder.make_node(uber_file)
uber_file_node_abs = uber_file_node.abspath()
task_generator_files.add(uber_file_node_abs)
uber_files.add(uber_file_node)
file_to_project_filter[uber_file_node_abs] = 'Uber Files'
for (filter_name, file_entries) in project_filter_list.items():
for file_entry in file_entries:
if file_entry.startswith('@ENGINE@'):
file_node = ctx.engine_node.make_node(file_entry[len('@ENGINE@'):])
file = file_node.abspath()
else:
if os.path.isabs(file_entry):
file_node = ctx.root.make_node(file_entry)
else:
file_node = ctx.path.make_node(file_entry)
file = file_entry
filenode_abs_path = file_node.abspath()
# Keep track of files for file_list file and track which ones are duplicate
if (filenode_abs_path in file_list_to_file_collection[file_list_file]):
file_list_to_duplicate_file_collection[file_list_file].add(filenode_abs_path)
has_duplicate_files = True
else:
file_list_to_file_collection[file_list_file].add(filenode_abs_path)
task_generator_files.add(filenode_abs_path)
# Collect per file information
if file_node.abspath() == pch_node.abspath():
# PCHs are not compiled with the normal compilation, hence don't collect them
found_pch = True
elif ctx.is_cxx_file(file):
source_files.add(file_node)
if not generate_uber_file:
no_uber_file_files.add(file_node)
elif file.endswith(('.mm', '.m')):
objc_source_files.add(file_node)
elif file.endswith(('.ui', '.qrc', '.ts')):
qt_source_files.add(file_node)
elif file.endswith(('.h', '.H', '.hpp', '.HPP', '.hxx', '.HXX')):
header_files.add(file_node)
elif file.endswith(('.rc', '.r')):
resource_files.add(file_node)
elif file.endswith('.plist'):
plist_files.add(file_node)
else:
other_files.add(file_node)
# Build file name -> Visual Studio Filter mapping
file_to_project_filter[filenode_abs_path] = filter_name
# Build list of uber files to files
if generate_uber_file:
uber_file_abspath = uber_file_node.abspath()
if not uber_file_abspath in uber_file_to_file_list:
uber_file_to_file_list[uber_file_abspath] = []
uber_file_to_file_list[uber_file_abspath] += [ file_node ]
# Remember which sources come from which file list (for later lookup)
file_list_to_source[file_list_file] = list(source_files | qt_source_files)
# Report any files that were duplicated within a file_list spec
if has_duplicate_files:
for (error_file_list,error_file_set) in file_list_to_duplicate_file_collection.items():
if len(error_file_set) > 0:
for error_file in error_file_set:
Logs.error('[ERROR] file "%s" was specifed more than once in file spec %s' % (str(error_file), error_file_list))
ctx.fatal('[ERROR] One or more files errors detected for target %s.' % (kw['target']))
# Compute final source list based on platform
if platform == 'project_generator' or ctx.options.file_filter != "":
# Collect all files plus uber files for project generators and when doing a single file compilation
if ctx.cmd == 'info':
kw['source'] = source_files | qt_source_files | objc_source_files | header_files | resource_files | other_files
else:
kw['source'] = uber_files | source_files | qt_source_files | objc_source_files | header_files | resource_files | other_files
kw['mac_plist'] = list(plist_files)
if platform == 'project_generator' and pch_file != '':
kw['source'].add(pch_node) # Also collect PCH for project generators
else:
# Regular compilation path
# Note: Always sort the file list so that it is stable between recompilation. WAF uses the order of input files to generate the UID of the task,
# and if the order changes, it will cause recompilation!
kw['source'] = []
if found_pch and not ctx.is_option_true('use_precompiled_header'):
kw['source'].append(pch_node) # Also collect PCH for when not using PCH for intended task
if ctx.is_option_true('use_uber_files'):
# Only take uber files when uber files are enabled and files not using uber files
kw['source'].extend(sorted(uber_files | no_uber_file_files | qt_source_files, key=lambda file: file.abspath()))
else:
# Fall back to pure list of source files
kw['source'].extend(sorted(source_files | qt_source_files, key=lambda file: file.abspath()))
# Append platform specific files
if ctx.is_apple_platform(platform):
kw['source'].extend(sorted(objc_source_files, key=lambda file: file.abspath()))
kw['mac_plist'] = list(sorted(plist_files, key=lambda file:file.abspath()))
elif ctx.is_windows_platform(platform):
kw['source'].extend(sorted(resource_files, key=lambda file: file.abspath()))
# Handle PCH files
if pch_file != '' and found_pch == False:
# PCH specified but not found
ctx.cry_file_error('[%s] Could not find PCH file "%s" in provided file list (%s).\nPlease verify that the name of the pch is the same as provided in a WAF file and that the PCH is not stored in an UberFile.' % (kw['target'], pch_file, ', '.join(file_lists)), 'wscript' )
# Try some heuristic when to use PCH files
#if ctx.is_option_true('use_uber_files') and found_pch and len(uber_file_relative_list) > 0 and ctx.options.file_filter == "" and ctx.cmd != 'generate_uber_files':
# Disable PCH files when having UberFiles as they bring the same benefit in this case
#kw['pch_name'] = kw['pch']
#del kw['pch']
# Store global lists in context
kw['task_generator_files'] = task_generator_files
kw['file_list_content'] = file_list_content
kw['project_filter'] = file_to_project_filter
kw['uber_file_lookup'] = uber_file_to_file_list
kw['file_list_to_source'] = file_list_to_source
kw['header_files'] = sorted(header_files, key=lambda file: file.abspath())
# In the uber files, paths are relative to the current module path, so make sure the root of the project is included in the include search path
# include it first to avoid wrong files being picked from other modules (e.g. picking stdafx.h from another module)
includeList = kw.setdefault('includes', [])
if '.' in includeList:
includeList.remove('.')
includeList.insert(0, '.')
def VerifyInput(ctx, kw):
"""
Helper function to verify passed input values
"""
# 'target' is required
target_name = kw['target']
is_launcher = kw.get('is_launcher', False)
wscript_file = ctx.path.make_node('wscript').abspath()
if kw['file_list'] == []:
ctx.cry_file_error('TaskGenerator "%s" is missing mandatory parameter "file_list"' % target_name, wscript_file )
if 'source' in kw:
ctx.cry_file_error('TaskGenerator "%s" is using unsupported parameter "source", please use "file_list"' % target_name, wscript_file )
# Loop through and check the follow type of keys that represent paths and validate that they exist on the system.
# If they do not exist, this does not mean its an error, but we want to be able to display a warning instead as
# having unnecessary paths in the include paths affects build performance
path_check_key_values = ['includes','libpath']
# Validate the paths only during the build command execution and exist in the spec
if ctx.cmd.startswith('build'):
if ctx.is_target_enabled(target_name, is_launcher):
# Validate the include paths and show warnings for ones that dont exist (only for the currently building platform
# and only during the build command execution)
current_platform, current_configuration = ctx.get_platform_and_configuration()
platform_details = ctx.get_target_platform_detail(current_platform)
# Special case: If the platform's alias include BOTH 'win' and 'msvc', reduce it to 'win'
if {'win', 'msvc'}.issubset(platform_details.aliases):
current_platform = 'win'
azcg_build_path = os.path.normcase(ctx.bldnode.make_node('azcg').abspath())
# Search for the keywords in 'path_check_key_values'
for kw_check in kw.keys():
for path_check_key in path_check_key_values:
if kw_check == path_check_key or (kw_check.endswith('_' + path_check_key) and kw_check.startswith(current_platform)):
path_check_values = kw[kw_check]
if path_check_values is not None:
# Make sure we are working with a list of strings, not a string
if isinstance(path_check_values,str):
path_check_values = [path_check_values]
for path_check in path_check_values:
if isinstance(path_check,str):
# If the path input is a string, derive the absolute path for the input path
path_to_validate = os.path.normcase(os.path.join(ctx.path.abspath(),path_check))
else:
# If the path is a Node object, get its absolute path
path_to_validate = os.path.normcase(path_check.abspath())
# Path validation can be skipped for azcg because it may not have been generated yet
is_azcg_path = path_to_validate.startswith(azcg_build_path)
if not os.path.exists(path_to_validate) and not is_azcg_path:
Logs.warn('[WARNING] \'{}\' value \'{}\' defined in TaskGenerator "{}" does not exist'.format(kw_check,path_to_validate,target_name))
def InitializeTaskGenerator(ctx, kw):
"""
Helper function to call all initialization routines required for a task generator
"""
apply_default_keywords(ctx, kw)
AddGlobalKeywords(ctx, kw)
SanitizeInput(ctx, kw)
VerifyInput(ctx, kw)
AssignTaskGeneratorIdx(ctx, kw)
RegisterVisualStudioFilter(ctx, kw)
TrackFileListChanges(ctx, kw)
return True
def apply_default_keywords(ctx, kw):
"""
Apply default keywords if they are not provided in the keyword dictionary
:param ctx: Context
:param kw: Keyword Dictionary
"""
if 'platforms' not in kw:
kw['platforms'] = ['all']
if 'configurations' not in kw:
kw['configurations'] = ['all']
def apply_cryengine_module_defines(ctx, kw):
additional_defines = []
ctx.add_aws_native_sdk_platform_defines(additional_defines)
#additional_defines.append('LY_BUILD={}'.format(ctx.get_lumberyard_build()))
append_kw_entry(kw, 'defines', additional_defines)
# Append any common static modules to the configuration
def AppendCommonModules(ctx,kw):
common_modules_dependencies = []
if not 'use' in kw:
kw['use'] = []
if not isinstance(ctx, ConfigurationContext) and 'project_generator' != ctx.env['PLATFORM']:
# Append common module's dependencies
bcrypt_required_platforms = ALIAS_TO_PLATFORMS_MAP.get('win', set()) | ALIAS_TO_PLATFORMS_MAP.get('msvc', set())
if any(p == ctx.env['PLATFORM'] for p in bcrypt_required_platforms): # ACCEPTED_USE
common_modules_dependencies = ['bcrypt']
if 'test' in ctx.env['CONFIGURATION'] or 'project_generator' == ctx.env['PLATFORM'] or isinstance(ctx, ConfigurationContext):
append_to_unique_list(kw['use'], 'AzTest')
if 'uselib' not in kw:
kw['uselib'] = []
append_to_unique_list(kw['uselib'], 'GMOCK')
append_kw_entry(kw,'lib',common_modules_dependencies)
def LoadAdditionalFileSettings(ctx, kw):
"""
Load all settings from the additional_settings parameter, and store them in a lookup map
"""
append_kw_entry(kw,'features',[ 'apply_additional_settings' ])
kw['file_specifc_settings'] = {}
for setting in kw['additional_settings']:
setting['target'] = kw['target'] # reuse target name
file_list = []
if 'file_list' in setting:
# Option A: The files are specifed as a *.waf_files (which is loaded already)
for list in setting['file_list']:
file_list += kw['file_list_to_source'][list]
if 'files' in setting:
# Option B: The files are already specified as an list
file_list += setting['files']
if 'regex' in setting:
# Option C: A regex is specifed to match the files
p = re.compile(setting['regex'])
for file in kw['source']:
if p.match(file):
file_list += [file]
# insert files into lookup dictonary, but make sure no uber file and no file within an uber file is specified
uber_file_folder = ctx.bldnode.make_node('..')
uber_file_folder = uber_file_folder.make_node('uber_files')
uber_file_folder = uber_file_folder.make_node(kw['target'])
for file in file_list:
if isinstance(file, Node.Node):
file_abspath = file.abspath()
elif os.path.isabs(file):
file_abspath = file
else:
file_abspath = ctx.path.make_node(file).abspath()
if 'uber_file_lookup' in kw:
for uber_file in kw['uber_file_lookup']:
# Uber files are not allowed for additional settings
if file_abspath == uber_file:
ctx.cry_file_error("Additional File Settings are not supported for UberFiles (%s) to ensure a consistent behavior without UberFiles, please adjust your setup" % file, ctx.path.make_node('wscript').abspath())
for entry in kw['uber_file_lookup'][uber_file]:
if file_abspath == entry.abspath():
ctx.cry_file_error("Additional File Settings are not supported for file using UberFiles (%s) to ensure a consistent behavior without UberFiles, please adjust your setup" % file, ctx.path.make_node('wscript').abspath())
# All fine, add file name to dictonary
kw['file_specifc_settings'][file_abspath] = setting
setting['source'] = []
def ConfigureTaskGenerator(ctx, kw):
"""
Helper function to apply default configurations and to set platform/configuration dependent settings
"""
target = kw['target']
# Ensure we have a name for lookup purposes
if 'name' not in kw:
kw['name'] = target
# Process any platform roots
platform_roots = kw.get('platform_roots', [])
if platform_roots:
if not isinstance(platform_roots, list):
platform_roots = [platform_roots]
for platform_root_param in platform_roots:
if not isinstance(platform_root_param,dict):
raise Errors.WafError("Invalid keyword value for 'platform_roots' for target '{}'. Expecting a list of "
"dictionary entries for 'root' (path) and 'export_includes' (boolean)".format(target))
platform_root = platform_root_param['root']
export_platform_includes = platform_root_param['export_includes']
ctx.add_platform_root(kw=kw,
root=platform_root,
export=export_platform_includes)
# Deal with restricted platforms
ctx.process_restricted_settings(kw)
# Determine if this is a build command (vs a platform generator)
is_configure_cmd = isinstance(ctx, ConfigurationContext)
is_build_platform_cmd = getattr(ctx, 'is_build_cmd', False)
# Lookup the PlatformConfiguration for the current platform/configuration (if this is a build command)
current_platform_configuration = None
target_platform = []
target_configuration = []
if is_build_platform_cmd:
target_platform = ctx.target_platform
target_configuration = ctx.target_configuration
current_platform_configuration = ctx.get_platform_configuration(target_platform, target_configuration)
# Provide the module name to the test framework.
if current_platform_configuration.is_test and not ctx.is_mac_platform(target_platform) and not ctx.is_linux_platform(target_platform):
module_define = 'AZ_MODULE_NAME="{}"'.format(target.upper())
append_kw_entry(kw, 'defines', module_define)
# If this is a linux target, then add 'linux_rpath' to the kw
if ctx.is_linux_platform(target_platform):
append_to_unique_list(kw.setdefault('linux_rpath', []), '$ORIGIN')
# Special case: Only non-android launchers can use required gems
apply_required_gems = kw.get('use_required_gems', False)
if kw.get('is_launcher', False):
if apply_required_gems and not ctx.is_android_platform(target_platform):
ctx.apply_required_gems_to_context(target, kw)
else:
if apply_required_gems:
ctx.apply_required_gems_to_context(target, kw)
# Apply all settings, based on current platform and configuration
ApplyConfigOverwrite(ctx, kw)
ApplyPlatformSpecificSettings(ctx, kw, target)
ApplyBuildOptionSettings(ctx, kw)
platform = ctx.env['PLATFORM']
is_engine_project = ctx.path.is_child_of(ctx.engine_node)
# Load all file lists (including additional settings)
file_list = kw['file_list']
for setting in kw['additional_settings']:
file_list += setting.get('file_list', [])
file_list += ctx.GetPlatformSpecificSettings(setting, 'file_list', platform, get_configuration(ctx, kw['target']) )
# Load all configuration specific files when generating projects
if platform == 'project_generator':
for configuration in ctx.get_all_configuration_names():
file_list += ctx.GetPlatformSpecificSettings(kw, 'file_list', platform, configuration)
for alias in LUMBERYARD_SETTINGS.get_configuration_aliases():
file_list += ctx.GetPlatformSpecificSettings(kw, 'file_list', platform, alias)
LoadFileLists(ctx, kw, file_list)
LoadAdditionalFileSettings(ctx, kw)
# If uselib is set, validate them
uselib_names = kw.get('uselib', None)
if uselib_names is not None:
processed_dependencies = []
for uselib_name in uselib_names:
if not is_third_party_uselib_configured(ctx, uselib_name):
Logs.warn("[WARN] Invalid uselib '{}' declared in project {}. This may cause compilation or linker errors".format(uselib_name,target))
else:
if not uselib_name in processed_dependencies and not uselib_name in kw.get('no_inherit_config', []):
ctx.append_dependency_configuration_settings(uselib_name, kw)
processed_dependencies.append(uselib_name)
# Make sure we have a 'use' list
if not kw.get('use', None):
kw['use'] = []
if platform != 'project_generator':
# Check if we are applying external file copies
if 'copy_external' in kw and len(kw['copy_external'])>0:
for copy_external_key in kw['copy_external']:
copy_external_env_key = 'COPY_EXTERNAL_FILES_{}'.format(copy_external_key)
if 'COPY_EXTERNAL_FILES' not in ctx.env:
ctx.env['COPY_EXTERNAL_FILES'] = []
append_kw_entry(kw,'features','copy_external_files')
if copy_external_env_key in ctx.env:
for copy_external_value in ctx.env[copy_external_env_key]:
ctx.env['COPY_EXTERNAL_FILES'].append(copy_external_value)
# Check if we are applying external file copies to specific files
copy_dependent_files = kw.get('copy_dependent_files',[])
if len(copy_dependent_files)>0:
append_kw_entry(kw,'features','copy_module_dependent_files')
copy_dependent_env_key = 'COPY_DEPENDENT_FILES_{}'.format(target.upper())
if copy_dependent_env_key not in ctx.env:
ctx.env[copy_dependent_env_key] = []
for copy_dependent_file in copy_dependent_files:
if is_engine_project:
copy_dependent_file_abs = os.path.normpath(os.path.join(ctx.engine_path,copy_dependent_file))
else:
copy_dependent_file_abs = os.path.normpath(os.path.join(ctx.path.abspath(),copy_dependent_file))
ctx.env[copy_dependent_env_key].append(copy_dependent_file_abs)
if ctx.is_windows_platform(platform):
# Handle meta includes for WinRT
for meta_include in kw.get('meta_includes', []):
append_kw_entry(kw,'cxxflags',[ '/AI' + meta_include ])
# Handle export definitions file
append_kw_entry(kw,'linkflags',[ '/DEF:' + ctx.path.make_node( export_file ).abspath() for export_file in kw['export_definitions']])
# Handle Spec unique defines (if one is provided)
if ctx.is_project_spec_specified():
append_kw_entry(kw, 'defines', ctx.get_current_spec_defines())
# Generate output file name (without file ending), use target as an default if nothing is specified
if kw['output_file_name'] == []:
kw['output_file_name'] = kw['target']
elif isinstance(kw['output_file_name'],list):
kw['output_file_name'] = kw['output_file_name'][0] # Change list into a single string
# Handle force_disable_mfc (Small Hack for Perforce Plugin (no MFC, needs to be better defined))
if kw.get('force_disable_mfc', False) and '_AFXDLL' in kw['defines']:
kw['defines'].remove('_AFXDLL')
# Clean out some duplicate kw values to reduce the size for the hash calculation
kw['defines'] = clean_duplicates_in_list(kw['defines'],'{} : defines'.format(target))
# Apply the default copyright_org if none is specified
if len(kw['copyright_org'])==0:
kw['copyright_org'] = ['Amazon']
if ctx.is_build_monolithic():
append_kw_entry(kw, 'defines', ['_LIB', 'AZ_MONOLITHIC_BUILD'])
if kw.get('inject_copyright', True):
append_kw_entry(kw,'features',[ 'generate_rc_file' ])
def RunTaskGenerator(ctx, *k, **kw ):
COPY_TARGETS_INDEX = 0
CTX_TYPE_INDEX = 1
if BUILD_TYPE_KW not in kw or BUILD_FUNC_KW not in kw:
raise Errors.WafError("'RunTaskGenerator is not called from a 'build_*' decorated function: {}.".format(inspect.stack()[1][3]))
build_type = kw[BUILD_TYPE_KW]
# At this point we dont need the special build function keywords, delete them so then dont get attached to the task
del kw[BUILD_FUNC_KW]
del kw[BUILD_TYPE_KW]
if build_type not in BUILD_PROCESS_TABLE:
ctx.fatal('[ERROR] Unsupported build target type: {}'.format(build_type))
return None
if build_type in ('program', 'shlib'):
# Make it so that this kind of module can link even if the program is currently running.
append_kw_entry(kw, 'features', ['link_running_program'])
target = kw['target']
if ctx.env['STUB_ST'] and build_type == 'shlib':
kw['output_stub_name'] = kw.get('output_file_name', target)
if ctx.env['ALT_STUB_ST'] and build_type == 'shlib':
kw['alt_output_stub_name'] = kw.get('output_file_name', target)
# If we are a linux platform, add set rpath to [$ORIGIN] automatically so it applies the
# current program/shared lib path to the library search path
host = Utils.unversioned_sys_platform()
if host == 'linux':
append_to_unique_list(kw.setdefault('rpath', []), ['$ORIGIN'])
pass
return getattr(ctx, BUILD_PROCESS_TABLE[build_type][CTX_TYPE_INDEX])(*k, **kw)
@feature('fake_extern_engine_lib')
@after_method('copy_module_dependent_files')
def process_extern_engine_lib(self):
"""
Feature implementation that will replaces a normal shlib or stlib task definition that normally takes in
source files but instead will create a 'fake' library definition from a prebuilt binary. It also does
an extra step by copying the prebuilt binary to its target destination
:param self: Context
"""
def _copy_file(src, dst):
if should_overwrite_file(src, dst):
try:
# In case the file is readonly, we'll remove the existing file first
if os.path.exists(dst):
os.chmod(dst, stat.S_IWRITE)
fast_copy2(src, dst)
except:
Logs.warn('[WARN] Unable to copy {} to destination {}. '
'Check the file permissions or any process that may be locking it.'
.format(src, dst))
pass
def _copy_dir(src, dst_path_base):
basename = os.path.basename(src)
dst_path_abs = os.path.join(dst_path_base,basename)
if not os.path.exists(dst_path_abs):
os.mkdir(dst_path_abs)
items = os.listdir(src)
for item in items:
src_item_abs = os.path.join(src, item)
if os.path.isdir(src_item_abs):
_copy_dir(src_item_abs, dst_path_abs)
else:
dst_item_abs = os.path.join(dst_path_abs, item)
_copy_file(src_item_abs, dst_item_abs)
# Perform the copy of files here if needed, without using WAF's task dependency
base_target_path = self.target_folder
target_paths = [base_target_path]
output_subfolder_copies = getattr(self,'output_sub_folder_copy',[]) or []
for output_subfolder_copy in output_subfolder_copies:
output_subfolder_path = os.path.realpath(os.path.join(base_target_path,output_subfolder_copy))
if not os.path.exists(output_subfolder_path):
try:
os.makedirs(output_subfolder_path)
target_paths.append(output_subfolder_path)
except:
Logs.warn('[WARN] Target folder "{}" for engine output cannot be created.'.format(output_subfolder_path))
for source_path in getattr(self,'source_paths',[]):
if os.path.isfile(source_path):
for target_path in target_paths:
dest_path = os.path.join(target_path,os.path.basename(source_path))
_copy_file(source_path, dest_path)
elif os.path.isdir(source_path):
for target_path in target_paths:
_copy_dir(source_path, target_path)
# Calculate the module target
# Create a link task only for stlib or shlib
if self.lib_type in ('stlib','shlib'):
output_filename = self.env['c{}_PATTERN'.format(self.lib_type)] % self.output_name
output_file = os.path.join(self.target_folder, output_filename)
if not os.path.exists(output_file):
raise Errors.WafError('could not find library %r' % self.output_name)
output_file_node = self.bld.root.find_node(output_file)
output_file_node.cache_sig = Utils.h_file(output_file_node.abspath())
self.link_task = self.create_task('fake_%s' % self.lib_type, [], [output_file_node])
if not getattr(self, 'target', None):
self.target = self.name
if not getattr(self, 'output_file_name', None):
self.output_file_name = self.target
def MonolithicBuildModule(ctx, *k, **kw):
"""
Util function to collect all libs and linker settings for monolithic builds
(Which apply all of those only to the final link as no DLLs or libs are produced)
"""
# Set up member for monolithic build settings
if not hasattr(ctx, 'monolithic_build_settings'):
ctx.monolithic_build_settings = defaultdict(lambda: [])
# For game specific modules, store with a game unique prefix
prefix = ''
if kw.get('game_project', False):
prefix = kw['game_project'] + '_'
# Collect libs for later linking
def _append(key, values):
if not ctx.monolithic_build_settings.get(key):
ctx.monolithic_build_settings[key] = []
ctx.monolithic_build_settings[key] += values
def _append_linker_options():
for setting in ['stlib', 'stlibpath', 'lib', 'libpath', 'linkflags', 'framework']:
_append(prefix + setting, kw[setting] )
_append(prefix + setting, ctx.GetPlatformSpecificSettings(kw, setting, ctx.env['PLATFORM'], ctx.env['CONFIGURATION']))
# If this is a cryengine module, then it is marked to be included in all monolithic applications implicitly
is_cryengine_module = kw.get('is_cryengine_module', False)
if is_cryengine_module:
_append(prefix + 'use', [ kw['target'] ] )
_append_linker_options()
# If this is a gem we need to be sure the linker options apply to the monolithic application, but do not want to reapply use
# because it can duplicate symbols
elif kw.get('is_gem', False):
_append_linker_options()
if 'uselib' in kw:
_append(prefix + 'uselib', kw['uselib'] )
# Remove rc files from the sources for monolithic builds (only the rc of
# the launcher will be used) and remove any duplicate files that may have
# sneaked in as well (using the python idiom: list(set(...)) to do so
kw['source'] = [file for file in kw['source'] if not file.abspath().endswith('.rc')]
return ctx.objects(*k, **kw)
###############################################################################
def BuildTaskGenerator(ctx, kw):
"""
Check if this task generator should be build at all in the current configuration
"""
target = kw['target']
if BUILD_FUNC_KW not in kw or BUILD_TYPE_KW not in kw:
raise Errors.WafError("'BuildTaskGenerator is not called from a 'build_*' decorated function: {}.".format(inspect.stack()[1][3]))
module_type = kw[BUILD_FUNC_KW]
build_type = kw[BUILD_TYPE_KW]
if isinstance(ctx, ConfigurationContext):
ctx.update_module_definition(module_type, build_type, kw)
return False
is_launcher = kw.get('is_launcher', False)
# Assign a deterministic UID to this target
ctx.assign_target_uid(kw)
current_platform = ctx.env['PLATFORM']
current_configuration = ctx.env['CONFIGURATION']
if ctx.cmd == 'configure':
# During the configure process, do an extra check on the module's declared 'use' or 'uselib' to see if it matches
# any tagged invalid uselibs that was detected during the 3rd party initialization.
if hasattr(ctx,'InvalidUselibs'):
declared_uselibs = kw.get('uselib',[]) + kw.get('use',[])
illegal_uselib = []
for declared_uselib in declared_uselibs:
if declared_uselib in ctx.InvalidUselibs:
illegal_uselib.append(declared_uselib)
if len(illegal_uselib)>0:
Logs.warn("[WARN] Module '{}' may fail to build due to invalid use or uselib dependencies ({})".format(target,','.join(illegal_uselib)))
return False # Dont build during configure
if ctx.cmd == 'generate_uber_files':
ctx(features='generate_uber_file', uber_file_list=kw['file_list_content'], target=target, pch=os.path.basename( kw.get('pch', '') ))
return False # Dont do the normal build when generating uber files
if ctx.cmd == 'generate_module_def_files':
ctx(features='generate_module_def_files',
use_module_list=kw['use'],
platform_list=kw.get('platforms', []),
configuration_list=kw.get('configurations', []),
export_internal_3p_libs=kw.get('export_internal_3rd_party_libs', False),
target=target)
return False
if ctx.cmd == 'generate_module_dependency_files': # Command used by Integration Toolkit (ly_integration_toolkit.py)
ctx(features='generate_module_dependency_files',
use_module_list=kw['use'],
platform_list=kw.get('platforms', []),
configuration_list=kw.get('configurations', []),
target=target,
waf_files=kw.get('waf_file_entries', []))
return False
if current_platform == 'project_generator':
return True # Always include all projects when generating project for IDEs
if kw and kw.get('dx12_only',False):
if not ctx.has_dx12():
return False
if not ctx.is_valid_platform_request(**kw):
return False
if not ctx.is_valid_configuration_request(**kw):
return False
# If this is a unit test module for a static library (see CryEngineStaticLibrary), then enable it if its unit test target
# is enabled as well
unit_test_target = kw.get('unit_test_target')
if unit_test_target and ctx.is_target_enabled(unit_test_target, is_launcher):
Logs.debug('lumberyard: module {} enabled for platform {}.'.format(target, current_platform))
return True
if ctx.is_target_enabled(target, is_launcher):
Logs.debug('lumberyard: module {} enabled for platform {}.'.format(target, current_platform))
return True # Skip project is it is not part of the current spec
Logs.debug('lumberyard: disabled module %s because it is not in the current list of spec modules' % target)
return False
@feature('apply_additional_settings')
@before_method('extract_vcxproj_overrides')
def tg_apply_additional_settings(self):
"""
Apply all settings found in the additional_settings parameter after all compile tasks are generated
"""
if len(self.file_specifc_settings) == 0:
return # no file specific settings found
for t in getattr(self, 'compiled_tasks', []):
input_file = t.inputs[0].abspath()
file_specific_settings = self.file_specifc_settings.get(input_file, None)
if not file_specific_settings:
continue
t.env['CFLAGS'] += file_specific_settings.get('cflags', [])
t.env['CXXFLAGS'] += file_specific_settings.get('cxxflags', [])
t.env['DEFINES'] += file_specific_settings.get('defines', [])
for inc in file_specific_settings.get('defines', []):
if os.path.isabs(inc):
t.env['INCPATHS'] += [ inc ]
else:
t.env['INCPATHS'] += [ self.path.make_node(inc).abspath() ]
###############################################################################
def find_file_in_content_dict(content_dict, file_name):
"""
Check if a file exists in the content dictionary
"""
file_name_search_key = file_name.upper()
for uber_file_name in iter(content_dict):
vs_filter_dict = content_dict[uber_file_name]
for vs_filter_name in iter(vs_filter_dict):
source_files = vs_filter_dict[vs_filter_name]
for source_file in source_files:
if source_file.upper() == file_name_search_key:
return True
# Handle the (posix) case if file_name is in a different folder than the context root
if source_file.upper().endswith('/'+file_name_search_key):
return True
# Handle the (dos) case if file_name is in a different folder than the context root
if source_file.upper().endswith('\\'+file_name_search_key):
return True
return False
###############################################################################
@conf
def ConfigureTarget(ctx, *k, **kw):
if not isinstance(ctx, ConfigurationContext):
return False
return ctx.update_module_definition(kw)
@build_shlib
def CryEngineModule(ctx, *k, **kw):
"""
CryEngine Modules are mostly compiled as dynamic libraries
Except the build configuration requires a monolithic build
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
if hasattr(ctx, 'game_project'):
if ctx.is_android_platform(ctx.env['PLATFORM']) and ctx.game_project is not None:
if ctx.get_android_settings(ctx.game_project) == None:
Logs.warn('[WARN] Game project - %s - not configured for Android. Skipping...' % ctx.game_project)
return
kw['game_project'] = ctx.game_project
if kw.get('use_gems', False):
# if this is defined it means we need to add all the defines, includes and such that the gem provides
# to this project.
ctx.apply_gems_to_context(ctx.game_project, k, kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.is_build_monolithic():
# For monolithic builds, simply collect all build settings
kw['is_cryengine_module'] = True
return MonolithicBuildModule(ctx, getattr(ctx, 'game_project', None), *k, **kw)
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib{}.dylib'.format(kw['output_file_name'])])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryEngineSharedLibrary(ctx, *k, **kw):
"""
Definition for shared libraries. This is not considered a module, so it will not be implicitly included
in project dependencies.
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.is_build_monolithic():
# For monolithic builds, simply collect all build settings
kw['is_cryengine_module'] = False
return MonolithicBuildModule(ctx, getattr(ctx, 'game_project', None), *k, **kw)
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_stlib
def CryEngineStaticLibraryLegacy(ctx, *k, **kw):
"""
CryEngine Static Libraries are static libraries
Except the build configuration requires a monolithic build
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
kw['stlib'] = True
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.cmd == 'generate_uber_files':
return ctx(features='generate_uber_file', uber_file_list=kw['file_list_content'], target=kw['target'], pch=os.path.basename( kw.get('pch', '') ))
if ctx.is_build_monolithic():
append_kw_entry(kw, 'defines',[ 'AZ_MONOLITHIC_BUILD' ])
append_kw_entry(kw,'features',['c', 'cxx', 'cstlib', 'cxxstlib', 'use'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_stlib
def CryEngine3rdPartyStaticLibrary(ctx, *k, **kw):
"""
CryEngine Static Libraries are static libraries
Except the build configuration requires a monolithic build
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
# We will only consider (re)building the 3rd party lib if the spec is set to '3rd_party'
if '3rd_party' != getattr(ctx.options, 'project_spec', ''):
return
if 'base_path' not in kw:
ctx.fatal('Mandatory "base_path" task generater parameter for 3rd Party Static Libraries missing in %s/wscript' % ctx.path.abspath())
base_path = kw['base_path']
# For 3rd party libraries, the output folder is calculated based on the base path, fixed name 'build', platform and configuration
platform_key = ctx.env['PLATFORM']
if platform_key != 'project_generator' and ctx.cmd != 'configure':
platform_shortname = get_third_party_platform_name(ctx, platform_key)
configuration_key = get_configuration(ctx, kw['target'])
configuration_name = get_third_party_configuration_name(ctx, configuration_key)
target_path = os.path.normcase(os.path.join(base_path, 'build', platform_shortname, configuration_name))
kw['output_folder'] = target_path
kw['stlib'] = True
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.cmd == 'generate_uber_files':
return ctx(features='generate_uber_file', uber_file_list=kw['file_list_content'], target=kw['target'], pch=os.path.basename( kw.get('pch', '') ))
append_kw_entry(kw,'features',['c', 'cxx', 'cstlib', 'cxxstlib', 'use','generate_3p_static_lib_config'])
return RunTaskGenerator(ctx, *k, **kw)
@build_program
def CryLauncher(ctx, *k, **kw):
"""
Wrapper for CryEngine Executables
"""
# Copy kw dict and some internal values to prevent overwriting settings in one launcher from another
apply_cryengine_module_defines(ctx, kw)
if ctx.env['PLATFORM'] != 'project_generator': # if we're making project files for an IDE, then don't quit early
if ctx.is_building_dedicated_server():
return # regular launchers do not build in dedicated server mode.
active_projects = ctx.get_enabled_game_project_list()
for project in active_projects:
kw_per_launcher = copy.deepcopy(kw)
kw_per_launcher['target'] = project + kw['target'] # rename the target!
CryLauncher_Impl(ctx, project, *k, **kw_per_launcher)
def codegen_static_modules_cpp(ctx, static_modules, kw):
if not ctx.is_build_monolithic():
return
# Write out json file listing modules. This will be fed into codegen.
static_modules_json = {'modules': static_modules}
static_modules_json_node = ctx.path.find_or_declare(kw['target'] + 'StaticModules.json')
static_modules_json_node.write(json.dumps(static_modules_json))
# get the full path of static_modules_json to extract out its path, and then set that path
# as the input dir
static_modules_json_node_abs_path = static_modules_json_node.abspath()
static_modules_json_node_dir = os.path.dirname(static_modules_json_node_abs_path)
kw['az_code_gen_input_dir'] = static_modules_json_node_dir
# LMBR-30070: We should be generating this file with a waf task. Until then,
# we need to manually set the cached signature.
static_modules_json_node.cache_sig = Utils.h_file(static_modules_json_node.abspath())
# Set up codegen for launcher.
kw['features'] += ['az_code_gen']
if 'az_code_gen' not in kw:
kw['az_code_gen'] = []
static_modules_py = ctx.engine_node.make_node('Code/LauncherUnified/CodeGen/StaticModules.py')
static_modules_node_rel = static_modules_py.path_from(ctx.path)
kw['az_code_gen'] += [
{
'files': [static_modules_json_node],
'scripts': [static_modules_node_rel],
'arguments': ['-JSON']
}
]
def codegen_static_modules_cpp_for_launcher(ctx, project, k, kw):
"""
Use codegen to create StaticModules.cpp and compile it into the launcher.
StaticModules.cpp contains code that bootstraps each module used in a monolithic build.
"""
if not ctx.is_build_monolithic():
return
# Gather modules used by this project
static_modules = ctx.project_flavor_modules(project, 'Game')
for gem in ctx.get_game_gems(project):
if not gem.is_legacy_igem:
for module in gem.modules:
# Only include game modules in static module list
if module.type == Gem.Module.Type.GameModule:
if module.name:
static_modules.append('{}_{}_{}'.format(gem.name, module.name, gem.id.hex))
else:
static_modules.append('{}_{}'.format(gem.name, gem.id.hex))
codegen_static_modules_cpp(ctx, static_modules, kw)
def codegen_static_modules_cpp_for_application(ctx, modules_to_scan, gems_spec, k, kw):
"""
Use codegen to create StaticModules.cpp and compile it into the launcher.
StaticModules.cpp contains code that bootstraps each module used in a monolithic build.
"""
if not ctx.is_build_monolithic():
return
# Gather modules used by this project
static_modules = modules_to_scan[:] #ctx.project_flavor_modules(project, 'Game')
gems_list = ctx.load_gems_from_gem_spec(gems_spec)
# gems_list += [gem for gem in ctx.load_required_gems() if gem not in gems_list]
for gem in gems_list:
if not gem.is_legacy_igem:
for module in gem.modules:
# Only include game modules in static module list
if module.type == Gem.Module.Type.GameModule:
if module.name:
static_modules.append('{}_{}_{}'.format(gem.name, module.name, gem.id.hex))
else:
static_modules.append('{}_{}'.format(gem.name, gem.id.hex))
codegen_static_modules_cpp(ctx, static_modules, kw)
PROJECT_GAME_AND_DLL_DICT = {}
def read_project_game_folder_and_dll(ctx, project_name):
global PROJECT_GAME_AND_DLL_DICT
if project_name in PROJECT_GAME_AND_DLL_DICT:
return PROJECT_GAME_AND_DLL_DICT[project_name]
project_json_file = ctx.srcnode.make_node(project_name).make_node('project.json')
if not os.path.exists(project_json_file.abspath()):
return project_name, project_name
project_json = ctx.parse_json_file(project_json_file)
game_folder = game_dll = project_name
if 'sys_game_folder' in project_json:
game_folder = project_json['sys_game_folder']
if 'sys_dll_game' in project_json:
game_dll = project_json['sys_dll_game']
PROJECT_GAME_AND_DLL_DICT[project_name] = game_folder, game_dll
return game_folder, game_dll
def CryLauncher_Impl(ctx, project, *k, **kw_per_launcher):
kw_per_launcher['vs_filter'] = 'Launchers'
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw_per_launcher):
return
# Append common modules
AppendCommonModules(ctx, kw_per_launcher)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx, kw_per_launcher)
LoadSharedSettings(ctx,k,kw_per_launcher)
kw_per_launcher['is_launcher'] = True
ConfigureTaskGenerator(ctx, kw_per_launcher)
game_folder, game_dll = read_project_game_folder_and_dll(ctx, project)
append_kw_entry(kw_per_launcher, 'defines', 'LY_GAMEFOLDER="{}"'.format(game_folder))
append_kw_entry(kw_per_launcher, 'defines', 'LY_GAMEDLL="{}"'.format(game_dll))
if not BuildTaskGenerator(ctx, kw_per_launcher):
return None
# include the android studio command in the check for android otherwise the tgen is
# incorrectly tagged as a program
is_android = ctx.is_android_platform(ctx.env['PLATFORM']) or ctx.cmd == 'android_studio'
is_monolithic = ctx.is_build_monolithic()
if is_android and not ctx.get_android_settings(project):
Logs.warn('[WARN] Game project - %s - not configured for Android. Skipping...' % ctx.game_project)
return None
kw_per_launcher['idx'] = kw_per_launcher['idx'] + (1000 * (ctx.project_idx(project) + 1));
# Setup values for Launcher Projects
kw_per_launcher['resource_path'] = ctx.launch_node().make_node(ctx.game_code_folder(project) + '/Resources')
kw_per_launcher['project_name'] = project
kw_per_launcher['output_file_name'] = ctx.get_executable_name( project )
Logs.debug("lumberyard: Generating launcher %s from %s" % (kw_per_launcher['output_file_name'], kw_per_launcher['target']))
ctx.apply_gems_to_context(project, k, kw_per_launcher)
codegen_static_modules_cpp_for_launcher(ctx, project, k, kw_per_launcher)
if ctx.is_build_monolithic():
append_kw_entry(kw_per_launcher,'defines',[ '_LIB', 'AZ_MONOLITHIC_BUILD' ])
append_kw_entry(kw_per_launcher,'features',[ 'apply_monolithic_build_settings'])
if not ctx.is_build_monolithic() and ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw_per_launcher, 'features', ['apply_non_monolithic_launcher_settings'])
if is_android:
# android doesn't have the concept of native executables so we need to build it as a shlib
kw_per_launcher['__build_type__'] = 'shlib'
return RunTaskGenerator(ctx, *k, **kw_per_launcher)
else:
append_kw_entry(kw_per_launcher,'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw_per_launcher)
###############################################################################
@build_program
def CryDedicatedServer(ctx, *k, **kw):
"""
Wrapper for CryEngine Dedicated Servers
"""
apply_cryengine_module_defines(ctx, kw)
active_projects = ctx.get_enabled_game_project_list()
# enable ASAN and ASLR by default on dedicated server
kw.setdefault('use_asan', True)
kw.setdefault('use_aslr', True)
for project in active_projects:
kw_per_launcher = copy.deepcopy(kw)
kw_per_launcher['target'] = project + kw['target'] # rename the target!
CryDedicatedserver_Impl(ctx, project, *k, **kw_per_launcher)
def CryDedicatedserver_Impl(ctx, project, *k, **kw_per_launcher):
kw_per_launcher['vs_filter'] = 'Launchers'
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw_per_launcher):
return
# Append common modules
AppendCommonModules(ctx,kw_per_launcher)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw_per_launcher)
append_kw_entry(kw_per_launcher,'win_linkflags',[ '/SUBSYSTEM:WINDOWS' ])
LoadSharedSettings(ctx,k,kw_per_launcher)
ConfigureTaskGenerator(ctx, kw_per_launcher)
game_folder, game_dll = read_project_game_folder_and_dll(ctx, project)
append_kw_entry(kw_per_launcher, 'defines', 'LY_GAMEFOLDER="{}"'.format(game_folder))
append_kw_entry(kw_per_launcher, 'defines', 'LY_GAMEDLL="{}"'.format(game_dll))
if not BuildTaskGenerator(ctx, kw_per_launcher):
return None
kw_per_launcher['idx'] = kw_per_launcher['idx'] + (1000 * (ctx.project_idx(project) + 1));
kw_per_launcher['is_dedicated_server'] = True
kw_per_launcher['resource_path'] = ctx.launch_node().make_node(ctx.game_code_folder(project) + '/Resources')
kw_per_launcher['project_name'] = project
kw_per_launcher['output_file_name'] = ctx.get_dedicated_server_executable_name(project)
Logs.debug("lumberyard: Generating dedicated server %s from %s" % (kw_per_launcher['output_file_name'], kw_per_launcher['target']))
ctx.apply_gems_to_context(project, k, kw_per_launcher)
codegen_static_modules_cpp_for_launcher(ctx, project, k, kw_per_launcher)
if ctx.is_build_monolithic():
Logs.debug("lumberyard: Dedicated server monolithic build %s ... " % kw_per_launcher['target'])
append_kw_entry(kw_per_launcher,'defines',[ '_LIB', 'AZ_MONOLITHIC_BUILD' ])
append_kw_entry(kw_per_launcher,'features',[ 'apply_monolithic_build_settings' ])
append_kw_entry(kw_per_launcher, 'features', ['copy_3rd_party_binaries'])
# Make sure that we are actually building a server configuration
if not isinstance(ctx, ConfigurationContext) and ctx.env['PLATFORM'] != 'project_generator':
try:
if not ctx.get_platform_configuration(ctx.env['PLATFORM'], ctx.env['CONFIGURATION']).is_server:
return
except Errors.WafError as e:
Logs.debug("Skipping target '{}' due to error: {}".format(kw_per_launcher['target'], e))
return
return RunTaskGenerator(ctx, *k, **kw_per_launcher)
###############################################################################
@build_program
def CryConsoleApplication(ctx, *k, **kw):
"""
Wrapper for CryEngine Executables
"""
if ctx.is_module_exempt(kw.get('target','')):
return
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
# Default clang behavior is to disable exceptions. For console apps we want to enable them
if 'CXXFLAGS' in ctx.env.keys() and 'darwin' in ctx.get_current_platform_list(ctx.env['PLATFORM']):
if '-fno-exceptions' in ctx.env['CXXFLAGS']:
ctx.env['CXXFLAGS'].remove("-fno-exceptions")
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
@build_program
def LyLauncherApplication(ctx, *k, **kw):
"""
Module to build a custom launcher that will build monolithically when needed
"""
if ctx.is_module_exempt(kw.get('target', '')):
return
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx, kw)
# Setup TaskGenerator specific settings
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
append_kw_entry(kw, 'win_linkflags', ['/SUBSYSTEM:CONSOLE'])
# Default clang behavior is to disable exceptions. For console apps we want to enable them
if 'CXXFLAGS' in ctx.env.keys() and 'darwin' in ctx.get_current_platform_list(ctx.env['PLATFORM']):
if '-fno-exceptions' in ctx.env['CXXFLAGS']:
ctx.env['CXXFLAGS'].remove("-fno-exceptions")
LoadSharedSettings(ctx, k, kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
if ctx.is_build_monolithic():
# Apply the monolithic logic to the application in the same way launchers are done
append_kw_entry(kw, 'defines', [ '_LIB', 'AZ_MONOLITHIC_BUILD' ])
append_kw_entry(kw, 'features', [ 'apply_monolithic_build_settings'])
kw['is_launcher'] = True
if 'gem_spec' in kw:
# Specified both a project and an override gem spec
gem_spec = kw['gem_spec']
static_modules = kw.get('static_modules', [])
codegen_static_modules_cpp_for_application(ctx, static_modules, gem_spec, k, kw)
ctx.apply_gem_spec_to_context(gem_spec, kw)
elif 'project' in kw:
# Specified a game project
project = kw['project']
codegen_static_modules_cpp_for_launcher(ctx, project, k, kw)
ctx.apply_gems_to_context(project, k, kw)
ctx.apply_required_gems_to_context(kw['target'], kw)
else:
# If no gem spec is specified, then this console app needs to be treated the same way as a launcher: Append the game name in front
# of the target, in order to handle multiple enabled games
active_projects = ctx.get_enabled_game_project_list()
for project in active_projects:
kw_per_console_app = copy.deepcopy(kw)
kw_per_console_app['target'] = project + kw['target'] # rename the target!
ctx.apply_gems_to_context(project, k, kw_per_console_app)
RunTaskGenerator(ctx, *k, **kw_per_console_app)
return None
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_program
def CryBuildUtility(ctx, *k, **kw):
"""
Wrapper for Build Utilities
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_file_container
def CryFileContainer(ctx, *k, **kw):
"""
Function to create a header only library
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
LoadSharedSettings(ctx,k,kw)
# Setup TaskGenerator specific settings
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return
# A file container is not supposed to actually generate any build tasks at all, but it still needs
# to be a task gen so that it can export includes and export defines (in the 'use' system)
# We want the files to show up in the generated projects (xcode and other IDEs) as if they are compile tasks
# but when actually building, we want them only to affect use / defines / includes.
if ctx.env['PLATFORM'] != 'project_generator':
# clear out the 'source' and features so that as little as possible executes.
kw['source'] = []
kw['features'] = ['use']
return ctx(*k, **kw)
###############################################################################
@build_program
def CryEditor(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Executables
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx, kw)
# Additional Editor-specific settings
append_kw_entry(kw,'defines',[ 'SANDBOX_EXPORTS' ])
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:WINDOWS' ])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_program
def LumberyardApp(ctx, *k, **kw):
"""
Wrapper to make lmbr_waf happy. We shouldn't tack on any settings here,
so we can make the waf transition easier later on.
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryEditorLib(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Library component
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Additional Editor-specific settings
append_kw_entry(kw,'features',[ 'generate_rc_file' ])
append_kw_entry(kw,'defines',[ 'SANDBOX_EXPORTS' ])
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags', ['/EHsc'])
append_kw_entry(kw,'defines',['USE_MEM_ALLOCATOR', 'EDITOR', 'DONT_BAN_STD_STRING', 'FBXSDK_NEW_API=1' ])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryEditorCore(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Core component
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags', ['/EHsc'])
append_kw_entry(kw,'defines',['EDITOR_CORE', 'USE_MEM_ALLOCATOR', 'EDITOR', 'DONT_BAN_STD_STRING', 'FBXSDK_NEW_API=1' ])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryEditorUiQt(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Core component
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags',['/EHsc'])
append_kw_entry(kw,'defines',[ 'NOMINMAX',
'EDITOR_UI_UX_CHANGE',
'EDITOR_QT_UI_EXPORTS',
'IGNORE_CRY_COMMON_STATIC_VAR',
'CRY_ENABLE_RC_HELPER',
'PLUGIN_EXPORTS',
'EDITOR_COMMON_IMPORTS'])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryPlugin(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Plugins
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags',['/EHsc'])
append_kw_entry(kw,'defines',[ 'SANDBOX_IMPORTS', 'PLUGIN_EXPORTS', 'EDITOR_COMMON_IMPORTS' ])
kw['output_sub_folder'] = 'EditorPlugins'
kw['features'] += ['qt5']#added QT to all plugins
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def BuilderPlugin(ctx, *k, **kw):
"""
Wrapper for Asset Builder SDK Builders
"""
kw['output_sub_folder']='Builders'
append_kw_entry(kw, 'msvc_cxxflags', ['/EHsc'])
append_kw_entry(kw, 'msvc_cflags', ['/EHsc'])
append_kw_entry(kw, 'win_defines', ['UNICODE'])
append_kw_entry(kw, 'use', ['AzToolsFramework', 'AssetBuilderSDK'])
append_kw_entry(kw, 'uselib', ['QT5CORE', 'QT5GUI', 'QT5WIDGETS'])
defines = []
append_kw_entry(kw, 'defines', defines)
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
apply_cryengine_module_defines(ctx, kw)
SetupRunTimeLibraries(ctx,kw)
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryStandAlonePlugin(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Plugins
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags',['/EHsc'])
append_kw_entry(kw,'defines',[ 'PLUGIN_EXPORTS' ])
append_kw_entry(kw,'win_debug_linkflags',['/NODEFAULTLIB:libcmtd.lib', '/NODEFAULTLIB:libcd.lib'])
append_kw_entry(kw,'win_profile_linkflags',['/NODEFAULTLIB:libcmt.lib', '/NODEFAULTLIB:libc.lib'])
append_kw_entry(kw,'win_release_linkflags',['/NODEFAULTLIB:libcmt.lib', '/NODEFAULTLIB:libc.lib'])
if not 'output_sub_folder' in kw:
kw['output_sub_folder'] = 'EditorPlugins'
kw['features'] += ['qt5'] #added QT to all plugins
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
kw['enable_rtti'] = [ True ]
kw['remove_release_define'] = [ True ]
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
kw['is_editor_plugin'] = True
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryPluginModule(ctx, *k, **kw):
"""
Wrapper for CryEngine Editor Plugins Util dlls, those used by multiple plugins
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags',['/EHsc'])
append_kw_entry(kw,'defines',[ 'PLUGIN_EXPORTS', 'EDITOR_COMMON_EXPORTS' ])
if not 'output_sub_folder' in kw:
kw['output_sub_folder'] = 'EditorPlugins'
kw['features'] += ['qt5']#added QT to all plugins
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
kw['remove_release_define'] = [ True ]
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryEditorCommon(ctx, *k, **kw):
"""
Wrapper for CryEditorCommon
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
ctx.set_editor_flags(kw)
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'msvc_cxxflags',['/EHsc'])
append_kw_entry(kw,'msvc_cflags',['/EHsc'])
append_kw_entry(kw,'defines',[ 'PLUGIN_EXPORTS', 'EDITOR_COMMON_EXPORTS'])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
kw['remove_release_define'] = [ True ]
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_program
def CryResourceCompiler(ctx, *k, **kw):
"""
Wrapper for RC
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw, 'dynamic')
ctx.set_rc_flags(kw, ctx)
kw['output_file_name'] = 'rc'
kw['output_sub_folder'] = 'rc'
Logs.debug('lumberyard: creating RC, with mirror_artifacts')
append_kw_entry(kw,'win_debug_linkflags',['/NODEFAULTLIB:libcmtd.lib', '/NODEFAULTLIB:libcd.lib'])
append_kw_entry(kw,'win_ndebug_linkflags',['/NODEFAULTLIB:libcmt.lib', '/NODEFAULTLIB:libc.lib'])
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryResourceCompilerModule(ctx, *k, **kw):
"""
Wrapper for RC modules
"""
# Initialize the Task Generator
if not InitializeTaskGenerator(ctx, kw):
return
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw, 'dynamic')
ctx.set_rc_flags(kw, ctx)
kw['output_sub_folder'] = 'rc'
append_kw_entry(kw,'win_debug_linkflags',['/NODEFAULTLIB:libcmtd.lib', '/NODEFAULTLIB:libcd.lib'])
append_kw_entry(kw,'win_ndebug_linkflags',['/NODEFAULTLIB:libcmt.lib', '/NODEFAULTLIB:libc.lib'])
if ctx.is_mac_platform(ctx.env['PLATFORM']):
append_kw_entry(kw,'linkflags',['-dynamiclib'])
LoadSharedSettings(ctx,k,kw)
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_shlib
def CryPipelineModule(ctx, *k, **kw):
"""
Wrapper for Pipleine modules (mostly DCC exporters)
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
AppendCommonModules(ctx,kw)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx, kw, 'dynamic')
apply_cryengine_module_defines(ctx, kw)
ctx.set_pipeline_flags(kw, ctx)
# LUMBERYARD
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
append_kw_entry(kw,'win_debug_linkflags',['/NODEFAULTLIB:libcmtd.lib', '/NODEFAULTLIB:libcd.lib'])
append_kw_entry(kw,'win_ndebug_linkflags',['/NODEFAULTLIB:libcmt.lib', '/NODEFAULTLIB:libc.lib'])
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
if ctx.env['PLATFORM'] == 'darwin_x64':
append_kw_entry(kw,'linkflags',['-install_name', '@rpath/lib'+kw['output_file_name']+'.dylib'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_program
def CryQtApplication(ctx, *k, **kw):
"""
Wrapper for Qt programs launched by the editor
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:WINDOWS' ])
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
@build_program
def CryQtConsoleApplication(ctx, *k, **kw):
"""
Wrapper for Qt programs launched by the editor
"""
# Initialize the Task Generator
InitializeTaskGenerator(ctx, kw)
# Append common modules
AppendCommonModules(ctx,kw)
apply_cryengine_module_defines(ctx, kw)
# Setup TaskGenerator specific settings
SetupRunTimeLibraries(ctx,kw)
append_kw_entry(kw,'win_linkflags',[ '/SUBSYSTEM:CONSOLE' ])
ConfigureTaskGenerator(ctx, kw)
if not BuildTaskGenerator(ctx, kw):
return None
append_kw_entry(kw, 'features', ['copy_3rd_party_binaries'])
return RunTaskGenerator(ctx, *k, **kw)
###############################################################################
# Helper function to set Flags based on options
def ApplyBuildOptionSettings(self, kw):
"""
Util function to apply flags based on waf options
"""
# Add debug flags if requested
if self.is_option_true('generate_debug_info'):
kw['cflags'].extend(self.env['COMPILER_FLAGS_DebugSymbols'])
kw['cxxflags'].extend(self.env['COMPILER_FLAGS_DebugSymbols'])
kw['linkflags'].extend(self.env['LINKFLAGS_DebugSymbols'])
# Add show include flags if requested
if self.is_option_true('show_includes'):
kw['cflags'].extend(self.env['SHOWINCLUDES_cflags'])
kw['cxxflags'].extend(self.env['SHOWINCLUDES_cxxflags'])
# Add preprocess to file flags if requested
if self.is_option_true('show_preprocessed_file'):
kw['cflags'].extend(self.env['PREPROCESS_cflags'])
kw['cxxflags'].extend(self.env['PREPROCESS_cxxflags'])
self.env['CC_TGT_F'] = self.env['PREPROCESS_cc_tgt_f']
self.env['CXX_TGT_F'] = self.env['PREPROCESS_cxx_tgt_f']
# Add disassemble to file flags if requested
if self.is_option_true('show_disassembly'):
kw['cflags'].extend(self.env['DISASSEMBLY_cflags'])
kw['cxxflags'].extend(self.env['DISASSEMBLY_cxxflags'])
self.env['CC_TGT_F'] = self.env['DISASSEMBLY_cc_tgt_f']
self.env['CXX_TGT_F'] = self.env['DISASSEMBLY_cxx_tgt_f']
# Add ASLR and ASAN flags
is_debug = self.env['CONFIGURATION'] in ('debug', 'debug_test')
if self.is_option_true('use_asan') or kw.get('use_asan', is_debug):
kw['cflags'].extend(self.env['ASAN_cflags'])
kw['cxxflags'].extend(self.env['ASAN_cxxflags'])
if self.is_option_true('use_aslr') or kw.get('use_aslr', False):
kw['linkflags'].extend(self.env['LINKFLAGS_ASLR'])
# Crash reporter settings
if self.options.external_crash_reporting:
kw['defines'] += ['EXTERNAL_CRASH_REPORTING=' + self.options.external_crash_reporting]
if self.options.crash_handler_token:
kw['defines'] += ['CRASH_HANDLER_TOKEN=' + self.options.crash_handler_token]
if self.options.crash_handler_url:
kw['defines'] += ['CRASH_HANDLER_URL=' + self.options.crash_handler_url]
# We always send in a packaged build time. It's only meaningful for packaged builds.
packaged_build_time = 0
if len(self.options.packaged_build_time) > 0:
packaged_build_time = self.options.packaged_build_time
kw['defines'] += ['LY_METRICS_BUILD_TIME={}'.format(packaged_build_time)]
###############################################################################
# Helper function to extract platform specific flags
@conf
def GetPlatformSpecificSettings(ctx, dict, entry, _platform, configuration):
"""
Util function to apply flags based on current platform
"""
def _to_list( value ):
if isinstance(value,list):
return value
return [ value ]
returnValue = []
platforms = ctx.get_current_platform_list(_platform)
# Check for entry in <platform>_<entry> style
for platform in platforms:
platform_entry = platform + '_' + entry
if not platform_entry in dict:
continue # No platfrom specific entry found
returnValue += _to_list( dict[platform_entry] )
if configuration == []:
return [] # Dont try to check for configurations if we dont have any
# Check for 'test' or 'dedicated' tagged entries
if _platform and _platform != 'project_generator':
platform_details = ctx.get_target_platform_detail(_platform)
configuration_details = platform_details.get_configuration(configuration)
if configuration_details.is_test:
test_entry = 'test_{}'.format(entry)
if test_entry in dict:
returnValue += _to_list(dict[test_entry])
for platform in platforms:
platform_test_entry = '{}_test_{}'.format(platform, entry)
if platform_test_entry in dict:
returnValue += _to_list(dict[platform_test_entry])
if configuration_details.is_server:
test_entry = 'dedicated_{}'.format(entry)
if test_entry in dict:
returnValue += _to_list(dict[test_entry])
for platform in platforms:
platform_test_entry = '{}_dedicated_{}'.format(platform, entry)
if platform_test_entry in dict:
returnValue += _to_list(dict[platform_test_entry])
# Check for entry in <configuration>_<entry> style
configuration_entry = configuration + '_' + entry
if configuration_entry in dict:
returnValue += _to_list( dict[configuration_entry] )
# Check for entry in <platform>_<configuration>_<entry> style
for platform in platforms:
platform_configuration_entry = platform + '_' + configuration + '_' + entry
if not platform_configuration_entry in dict:
continue # No platfrom /configuration specific entry found
returnValue += _to_list( dict[platform_configuration_entry] )
return returnValue
# Maintain a map of legacy keyword shortcuts to process the kw macro expansions (for shortcut aliases)
LEGACY_CONFIGURATION_SHORTCUT_ALIASES = {
'debug_all': ['debug', 'debug_dedicated', 'debug_test', 'debug_test_dedicated'],
'profile_all': ['profile', 'profile_dedicated', 'profile_test', 'profile_test_dedicated'],
'performance_all': ['performance', 'performance_dedicated'],
'release_all': ['release', 'release_dedicated'],
'dedicated_all': ['debug_dedicated', 'profile_dedicated', 'performance_dedicated', 'release_dedicated'],
'non_dedicated': ['debug', 'debug_test', 'profile', 'profile_test', 'performance', 'release'],
'test_all': ['debug_test', 'debug_test_dedicated', 'profile_test', 'profile_test_dedicated'],
'non_test': ['debug', 'debug_dedicated', 'profile', 'profile_dedicated', 'performance',
'performance_dedicated',
'release', 'release_dedicated']
}
def process_kw_macros_expansion(ctx, target, kw, platform, configuration):
"""
Process the special kw expansion macros in the keyword dictionary based on the configuration
Args:
target: Target name to report any warnings
kw: The keyword map to manipulate
configuration: The current build configuration
"""
class KeywordMacroNDebug:
"""
Keyword Macro handler to handle 'ndebug' macros. These macros are a convenience macro to expand non-debug
keywords to the appropriate non-debug configuration. This is to reduce the need to repeat all of the non-debug
configuration values in the keyword list
"""
def process(self,keyword_name, keyword_value, current_platform, current_configuration):
is_config_dedicated = current_configuration.endswith('_dedicated')
is_kw_dedicated = '_dedicated' in kw_entry
if is_config_dedicated != is_kw_dedicated:
return None, None
# Only process this keyword non-debug mode, otherwise it will be ignored
if current_configuration not in ('debug', 'debug_dedicated', 'debug_test', 'debug_test_dedicated'):
if '_ndebug_dedicated_' in kw_entry and is_config_dedicated:
new_kw_entry_name = keyword_name.replace('_ndebug_dedicated_','_{}_'.format(configuration))
return (new_kw_entry_name,keyword_value), keyword_name
elif '_ndebug_' in kw_entry:
new_kw_entry_name = keyword_name.replace('_ndebug_','_{}_'.format(configuration))
return (new_kw_entry_name,keyword_value), keyword_name
elif kw_entry.startswith('ndebug_dedicated') and is_config_dedicated:
new_kw_entry_name = keyword_name.replace('ndebug_dedicated_','{}_'.format(configuration))
return (new_kw_entry_name,keyword_value), keyword_name
elif kw_entry.startswith('ndebug'):
new_kw_entry_name = keyword_name.replace('ndebug_','{}_'.format(configuration))
return (new_kw_entry_name,keyword_value), keyword_name
return None, None
class KeywordMacroShortcutAlias:
"""
Keyword macro handler to handle shortcut aliases. These aliases are defined in waf_branch_spec and are used to
group multiple configurations under one using a dictionary. This will make it so that aliases can be used in
keywords as they are already used as a value for the 'configuration' keyword. For example:
debug_test_file_list = 'module_tests.waf_files',
profile_test_file_list = 'module_tests.waf_files'
becomes
test_all_file_list = 'module_tests.waf_files'
"""
def process(self, keyword_name, keyword_value, current_platform, current_configuration):
for alias, configs in LEGACY_CONFIGURATION_SHORTCUT_ALIASES.iteritems():
if alias == 'all':
continue # Do not use 'all' alias, it conflicts with other aliases
if current_configuration not in configs:
continue
if keyword_name.startswith(alias):
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace(alias, current_configuration)
elif '_{}_'.format(alias) in keyword_name:
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace('_{}_'.format(alias), '_{}_'.format(current_configuration))
else:
continue
return (new_kw_entry_name, keyword_value), remove_entry_name
return None, None
class KeywordWinX64PlatformGeneralization:
"""
Keyword macro handler support the generic win_x64 platform to expand to the current vs<version> platform
"""
def process(self, keyword_name, keyword_value, current_platform, current_configuration):
# This only processes for the win_ platform
if not ctx.is_windows_platform(current_platform):
return None, None
# Skip any concrete win_x64 platforms
for win_platform in LUMBERYARD_SETTINGS.get_platforms_for_alias('win'):
if win_platform in kw_entry:
return None, None
if kw_entry.startswith('win_x64_test') and current_platform.endswith('_test'):
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace('win_x64_test', current_platform)
elif kw_entry.startswith('win_x64') and not current_platform.endswith('_test'):
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace('win_x64', current_platform)
elif kw_entry.startswith('win_test') and current_platform.endswith('_test'):
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace('win_test', current_platform)
elif kw_entry.startswith('win') and not current_platform.endswith('_test'):
remove_entry_name = keyword_name
new_kw_entry_name = keyword_name.replace('win', current_platform)
else:
return None, None
return (new_kw_entry_name, keyword_value), remove_entry_name
# Only valid and supported configurations
if not configuration:
return
if not LUMBERYARD_SETTINGS.is_valid_configuration(configuration) and not LUMBERYARD_SETTINGS.is_valid_configuration_alias(configuration):
return
macros = [KeywordMacroNDebug(),KeywordWinX64PlatformGeneralization(),KeywordMacroShortcutAlias()]
if configuration != 'project_generator':
for macro in macros:
kw_entries_to_add = []
kw_entries_to_remove = []
for kw_entry, kw_value in kw.iteritems():
kw_entry_to_add, kw_entry_to_remove = macro.process(kw_entry, kw_value, platform, configuration)
if kw_entry_to_add is not None or kw_entry_to_remove is not None:
if kw_entry_to_add is not None:
kw_entries_to_add.append(kw_entry_to_add)
if kw_entry_to_remove is not None:
kw_entries_to_remove.append(kw_entry_to_remove)
if len(kw_entries_to_add)>0:
for new_kw_key, new_kw_value in kw_entries_to_add:
if new_kw_key in kw:
append_to_unique_list(kw[new_kw_key], new_kw_value)
else:
kw[new_kw_key] = new_kw_value
if len(kw_entries_to_remove)>0:
for kw_to_delete in kw_entries_to_remove:
del kw[kw_to_delete]
###############################################################################
# Wrapper for ApplyPlatformSpecificFlags for all flags to apply
@conf
def ApplyPlatformSpecificSettings(ctx, kw, target):
"""
Check each compiler/linker flag for platform specific additions
"""
platform = ctx.env['PLATFORM']
configuration = get_configuration( ctx, target )
# Expand any special macros
process_kw_macros_expansion(ctx, target, kw, platform, configuration)
# handle list entries
for entry in COMMON_INPUTS:
append_kw_entry(kw,entry,GetPlatformSpecificSettings(ctx, kw, entry, platform, configuration))
# Handle string entries
for entry in 'output_file_name'.split():
if not entry in kw or kw[entry] == []: # No general one set yet
kw[entry] = GetPlatformSpecificSettings(ctx, kw, entry, platform, configuration)
# Recurse for additional settings
for setting in kw['additional_settings']:
ApplyPlatformSpecificSettings(ctx, setting, target)
###############################################################################
# Set env in case a env overwrite is specified for this project
def ApplyConfigOverwrite(ctx, kw):
target = kw['target']
if not target in ctx.env['CONFIG_OVERWRITES']:
return
platform = ctx.env['PLATFORM']
overwrite_config = ctx.env['CONFIG_OVERWRITES'][target]
# Need to set crytek specific shortcuts if loading another enviorment
ctx.all_envs[platform + '_' + overwrite_config]['PLATFORM'] = platform
ctx.all_envs[platform + '_' + overwrite_config]['CONFIGURATION'] = overwrite_config
# Create a deep copy of the env for overwritten task to prevent modifying other task generator envs
kw['env'] = ctx.all_envs[platform + '_' + overwrite_config].derive()
kw['env'] .detach()
@conf
def get_current_spec_defines(ctx):
if ctx.env['PLATFORM'] == 'project_generator' or ctx.env['PLATFORM'] == []:
return [] # Return only an empty list when generating a project
return ctx.spec_defines()
@feature('apply_non_monolithic_launcher_settings')
@before_method('process_source')
def apply_non_monolithic_launcher_settings(self):
self.env['LINKFLAGS_MACBUNDLE'] = [] # disable the '-dynamiclib' flag
@feature('apply_monolithic_build_settings')
@before_method('process_source')
def apply_monolithic_build_settings(self):
# Add collected settings to link task
# Don't do 'list(set(...))' on these values as duplicates will be removed
# by waf later and some arguments need to be next to each other (such as
# -force_load <lib>). The set will rearrange the order in a
# non-deterministic way
def _apply_monolithic_build_settings(monolithic_dict, prefix=''):
append_to_unique_list(self.use, list(monolithic_dict[prefix + 'use']))
append_to_unique_list(self.uselib, list(monolithic_dict[prefix + 'uselib']))
append_to_unique_list(self.framework, list(monolithic_dict[prefix + 'framework']))
self.linkflags += list(monolithic_dict[prefix + 'linkflags'])
# static libs
self.stlib += list(monolithic_dict[prefix + 'stlib'])
append_to_unique_list(self.stlibpath, list(monolithic_dict[prefix + 'stlibpath']))
# shared libs
self.lib += list(monolithic_dict[prefix + 'lib'])
append_to_unique_list(self.libpath, list(monolithic_dict[prefix + 'libpath']))
Logs.debug("lumberyard: Applying monolithic build settings for %s ... " % self.name)
if not hasattr(self.bld, 'monolithic_build_settings'):
self.bld.monolithic_build_settings = defaultdict(lambda: [])
# All CryEngineModules use AzCore
_apply_monolithic_build_settings(self.bld.monolithic_build_settings)
# if we're compiling a tool that isn't part of a project, then project_name will not be set.
if getattr(self, 'project_name', None):
# Add game specific files
prefix = self.project_name + '_'
_apply_monolithic_build_settings(self.bld.monolithic_build_settings,prefix)
@feature('apply_monolithic_build_settings')
@after_method('apply_link')
def apply_monolithic_pch_objs(self):
""" You also need the output obj files from a MS PCH compile to be linked into any modules using it."""
for tgen_name in self.use:
try:
other_tg = self.bld.get_tgen_by_name(tgen_name)
except:
# If we cannot find the use name, check if its a uselib
if not is_third_party_uselib_configured(self.bld, tgen_name):
Errors.WafError("Invalid 'use' reference ({}) defined in module {}'".format(tgen_name,self.name))
other_pch_task = getattr(other_tg, 'pch_task', None)
if other_pch_task:
if other_pch_task.outputs[0] not in self.link_task.inputs:
Logs.debug('Lumberyard: Monolithic build: Adding pch %s from %s to %s ' % (other_pch_task.outputs[0], tgen_name, self.target))
self.link_task.inputs.append(other_pch_task.outputs[0])
@conf
def LoadSharedSettings(ctx, k, kw, file_path = None):
shared_list = kw.get('shared_settings',None)
if not shared_list:
return
source_folder = file_path
if not source_folder:
source_folder = ctx.CreateRootRelativePath('Code/Tools/SharedSettings')
if not source_folder:
return
for settings_file in shared_list:
this_file = os.path.join(source_folder, settings_file)
if os.path.exists(this_file):
source_node = ctx.root.make_node(this_file)
parsed_json = ctx.parse_json_file(source_node)
for key, value in parsed_json.iteritems():
append_kw_entry(kw, key, value)
@conf
def is_building_dedicated_server(ctx):
if ctx.cmd == 'configure':
return False
if ctx.cmd == 'generate_uber_files':
return False
if ctx.cmd == 'generate_module_def_files':
return False
if ctx.cmd == 'generate_module_dependency_files':
return False
config = ctx.env['CONFIGURATION']
if (config == '') or (config == []) or (config == 'project_generator'):
return False
return config.endswith('_dedicated')
@feature('cxx')
@before_method('process_source')
@before_method('add_pch_msvc')
@before_method('add_pch_clang')
def apply_custom_flags(self):
rtti_include_option = getattr(self,'enable_rtti',False)
if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME):
remove_release_define_option = getattr(self,'remove_release_define',False)
rtti_flag = '/GR' if rtti_include_option == [True] else '/GR-'
self.env['CXXFLAGS'] = list(filter(lambda r: not r.startswith('/GR'), self.env['CXXFLAGS']))
self.env['CXXFLAGS'] += [rtti_flag]
if remove_release_define_option:
self.env['DEFINES'] = list(filter(lambda r: r != '_RELEASE', self.env['DEFINES']))
else:
if rtti_include_option == [True]:
self.env['CXXFLAGS'] = list(filter(lambda r:not r.startswith('-fno-rtti'), self.env['CXXFLAGS']))
@feature('cxxprogram', 'cxxshlib', 'cprogram', 'cshlib', 'cxx', 'c')
@after_method('apply_link')
@before_method('process_use')
def set_link_outputs(self):
if self.env['PLATFORM'] == 'project_generator' or not getattr(self, 'link_task', None):
return
# If no type is defined, this is just a stub task that shouldnt handle any additional build/link tasks
if not hasattr(self,'_type'):
return
# apply_link creates the link task, and uses the output_file_name to create the outputs for the task
# unfortunately, it will only create outputs in the build folder (BinTemp/) only. The outputs are deleted here and
# recreated to move them into the output folder without copying if appropriate (dll/exe). these outputs may be
# large, > 1GB for a pdb, and copying these files during a build can be a serious performance issue.
# I opted for this method instead of modifying larger portions of waf, and trying to create the outputs in
# a different directory earlier
self.link_task.outputs = []
# If there is an attribute for 'output_folder', then this is an override of the output target folder
# Note that subfolder if present will still be applied below
# SanitizeInputs() forces output_folder to be a list.
output_folders = getattr(self, 'output_folder', None)
if output_folders:
if not isinstance(output_folders, list):
output_folders = [output_folders]
# Process each output folder and build the list of output nodes
output_nodes = []
for output_folder in output_folders:
if os.path.isabs(output_folder):
target_path = self.bld.root.make_node(output_folder)
else:
target_path = self.bld.path.make_node(output_folder)
target_path.mkdir()
output_nodes.append(target_path)
else:
output_nodes = self.bld.get_output_folders(self.bld.env['PLATFORM'], self.bld.env['CONFIGURATION'])
# append sub folder if it exists
output_sub_folder = getattr(self, 'output_sub_folder', None)
if output_sub_folder:
new_output_nodes = []
for output_node in output_nodes:
output_node = output_node.make_node(output_sub_folder)
output_node.mkdir()
new_output_nodes.append(output_node)
output_nodes = new_output_nodes
# process only the first output, additional outputs will copy from the first. Its rare that additional copies are needed
output_node = output_nodes[0]
if self._type == 'stlib':
# add_target() will create an output in the temp/intermediate directory. Since .libs are not directly executable, they
# are left in the temp directory. If an additional lib copy is specified with copy_static_library, a copy will
# be done below where additional copies are handled.
self.link_task.add_target(self.output_file_name)
else:
# add_target() creates the intermediate nodes directories, but enforces that the output is always
# under the parent task, which is a folder in the build directory.
# Instead, use make_node()/set_outputs() to put the target will be in the output location
# find the pattern to apply to the output target, similar to add_target()
pattern = self.env[self.link_task.__class__.__name__ + '_PATTERN']
if not pattern:
pattern = '%s'
# apply pattern to target
target = pattern % self.output_file_name
# create node (and intermediate folders)
target = output_node.make_node(target)
self.link_task.set_outputs(target)
# remove extensions to get the name of the target, we will be using it as a base for additional outputs below
target_node = self.link_task.outputs[0]
name = os.path.splitext(target_node.name)[0]
# msvc-specific handling. its easier to do here because we have access to the output_nodes
is_msvc = 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME)
is_secondary_copy_install = getattr(self, 'is_secondary_copy_install', False)
if is_msvc:
# add the import library to the output list. Only add if secondary_copy_install is set
if self._type == 'shlib' and (self.bld.artifacts_cache or is_secondary_copy_install):
import_lib_node = output_node.make_node(name + '.lib')
import_lib_exp_node = output_node.make_node(name + '.exp')
self.link_task.set_outputs(import_lib_node)
self.link_task.set_outputs(import_lib_exp_node)
# create map files
if self.bld.is_option_true('generate_map_file'):
if self._type != 'stlib':
map_file_node = output_node.make_node(name + '.map')
self.link_task.outputs.append(map_file_node)
self.env.append_value('LINKFLAGS', '/MAP:' + map_file_node.abspath())
# add pdb to outputs
if self.bld.is_option_true('generate_debug_info'):
if self._type != 'stlib':
pdb_node = output_node.make_node(name + '.pdb')
self.link_task.outputs.append(pdb_node)
if self.env.MSVC_MANIFEST:
"""
Special linker for MSVC with support for embedding manifests into DLL's
and executables compiled by Visual Studio 2005 or probably later. Without
the manifest file, the binaries are unusable.
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx
"""
if self._type == 'shlib' or getattr(self, "additional_manifests", None):
man_node = output_node.make_node(target_node.name + '.manifest')
self.link_task.outputs.append(man_node)
self.link_task.do_manifest = True
# prep additional copies variables
output_sub_folder_copy_attr = getattr(self, 'output_sub_folder_copy', [])
if isinstance(output_sub_folder_copy_attr, str):
output_sub_folder_copy_attr = [ output_sub_folder_copy_attr ]
is_import_library = is_msvc and self._type == 'shlib' # only do import lib copy on windows, .dylibs/.so don't work like this
skip_secondary_copy = (not output_sub_folder_copy_attr) and (is_import_library and not is_secondary_copy_install)
# copy task creator lambda
def _create_sub_folder_copy_task(output_sub_folder_copy, output_node):
if output_sub_folder_copy is not None and output_sub_folder_copy != output_sub_folder:
output_node_parent = output_node.parent.make_node(output_sub_folder_copy)
output_node_copy = output_node_parent.make_node(os.path.basename(output_node.abspath()))
self.create_copy_task(output_node, output_node_copy, False, True)
# handle additional copies
copy_static_library = getattr(self, 'copy_static_library', False)
for idx, output_node in enumerate(output_nodes):
# for additional output copies, create copy tasks
for output in self.link_task.outputs:
if (idx > 0) or copy_static_library:
output_node_copy = output_node.make_node(output.name)
self.create_copy_task(output, output_node_copy, False, True)
# Special case to handle additional copies
if not skip_secondary_copy:
for output_sub_folder_copy_attr_item in output_sub_folder_copy_attr:
if isinstance(output_sub_folder_copy_attr_item, str):
# only add the import library to the output list (allowing it to be copied)
# if we're trying to do an install
_, ext = os.path.splitext(output.name)
if is_secondary_copy_install or (ext in ['.dll', '.pdb', '.dylib', '.so']):
# For secondary copies, we only want to perform them if we are building specifically for a spec
# that contains the current target, otherwise skip because this task may have been pulled in
# by the dependency tree check
if (self.bld.is_project_spec_specified() and self.target in self.bld.spec_modules(self.bld.options.project_spec)) or not self.bld.is_project_spec_specified():
_create_sub_folder_copy_task(output_sub_folder_copy_attr_item, output)
else:
Logs.warn("[WARN] attribute items in 'output_sub_folder_copy' must be a string.")
# Use this feature if you want to write to an existing exe/dll that may be currently in use. The os prevents
# writing to files that are currently inuse because it may have not loaded the whole file, so changing part of it
# could be catastrophic later as it pages in the file. We can get around this restriction by changing the file
# name: the same inode will still be used/referenced by the os to complete paging operations. We will then be
# able to write to a new file with that existing file name. The old process will continue to run as a .exe_tmp (or .dll_tmp) file.
# Another trick is used to delete the temporary renamed files: unlink. When the os loads a file, it increments
# a ref count to the file and decrements it when its done with the file. This is how temporary files work. These
# files will be hidden if you try to look at them through explorer or ls. Unlink allows us to remove the file
# system's reference to an existing file, but that file won't be deleted from the system until all programs that
# are using it also close their reference, whereas delete/erase requires that the file be removable at that time.
# Unfortunately, unlink() doesn't work well on all platforms, so its optional cleanup to help keep your temporaries low.
@feature('link_running_program')
@after_method('set_link_outputs')
def patch_run_method(self):
if self.env['PLATFORM'] == 'project_generator' or not getattr(self, 'link_task', None):
return
def temp_file_name(file):
temp_file_name = file + '_tmp'
return temp_file_name
# lambda to callback with a saved 'self' variable
old_run = self.link_task.run
def patch_run():
# verify that the dest files are writable.
file_rename_pairs = []
unexpected_lock_error = False
Logs.debug('link_running_program: applying: %s' % (self.name))
for output in self.link_task.outputs:
tgt = output.abspath()
temp_tgt = temp_file_name(tgt)
if os.access(temp_tgt, os.F_OK):
if not os.access(temp_tgt, os.W_OK):
Logs.warn('link_running_program: temp file %s marked read-only, will cause issues' % (temp_tgt))
# delete existing file. On win, unlink() and delete() are the same implementation in python
# unlink on posix allows us to remove a file if its currently in use
try:
Logs.debug('link_running_program: removing temporary file %s' % (temp_tgt))
os.unlink(temp_tgt)
except OSError as e:
if e.errno in [errno.EACCES]:
# something is currently locking the temp file, that is ok, assume the tgt file is available as the target
Logs.debug('link_running_program: unable to remove temporary file %s, assuming it in use' % (temp_tgt))
continue
try:
# check for file existence.
file_exists = os.access(tgt, os.F_OK)
if not file_exists:
Logs.debug('link_running_program: file %s doesnt exist - there should be no issue with locks' % (tgt))
continue
# check for file write-ability. must do this after checking for existance because it will fail silently
# if the file doesn't exist, and F_OK is defined as 0 so it can't be or'd with the other constants
# actually tests for both permissions because an unreadable file is useless, and some linkers will read
# the contents of the file for delta modification
file_writable = os.access(tgt, os.W_OK)
if not file_writable:
# if the file doesn't have write permissions, let the compile task fail, it will provide a good error message
# renaming write-protected files should not occur, the user has decided that the file should not change
# and the build should respect that
Logs.warn('link_running_program: file has +W - %s' % (tgt))
continue
# rename an existing file to temp file, may fail if tmp file is locked (in use)
# rename them to a temporary file, which will allow a new
# file of that name to be written to the actual dest
Logs.debug('link_running_program: rename %s -> %s' % (tgt, temp_tgt))
os.rename(tgt, temp_tgt)
# save the rename, will try to unroll these changes on task error
file_rename_pairs.append((tgt, temp_tgt))
except OSError as e:
# unable to move to temporary, which may fail if temp_tgt is in use. If temp_tgt is write protected, it will
# return EEXISTS, which is ok as long as tgt is also not locked. There is a warning above for write
# protected temp files.
if e.errno in [errno.EACCES, errno.EEXIST]:
continue
Logs.debug('link_running_program: unable to move %s -> %s due to errno %d' % (tgt, temp_tgt, e.errno))
unexpected_lock_error = True
# run the command, generating a new output files
ret = 1 # assume failure in case an exception fires and fails to write ret
try:
ret = old_run()
finally:
if ret and unexpected_lock_error:
# log a message if we detected an oddity before running the command
Logs.error("link_running_program: Error detected trying to move files to temporary files before compiling, may cause compilation failure")
if ret:
# failure case, restore temporary file to original name, leaving the file still runnable
for (tgt, temp_tgt) in file_rename_pairs:
try:
Logs.debug('link_running_program: bailing out - restoring %s -> %s' % (temp_tgt, tgt))
try:
# the linker may write out a partial file, which happens with pdb files that contain missing
# symbols. Remove the potentially partial file before restoring
os.unlink(tgt)
except OSError as e:
# ignore errors, it will be caught in the next rename operation, if its actually an error
pass
os.rename(temp_tgt, tgt)
except OSError as e:
Logs.warn('link_running_program: Could not restore existing file on failure: %s -> %s' % (temp_tgt, tgt))
else:
# success case, unlink/delete temp file
for (tgt, temp_tgt) in file_rename_pairs:
try:
# unlink() will mark files that are currently open for deletion on close
# unlink() and delete() system calls are slightly different, delete() won't return until
# it successfully deletes the file. On Win python, these 2 function both map to delete()
# internally, which is wrong and confusing. There is another windows api that works
# like unlink on posix, DeleteFile(). In previous versions of windows, this would work
# on executables as well, but no longer does. Since this is optional cleanup, I didn't
# bother to attempt to use the windows specific api. The individual executable should
# also attempt to clean up temp copies
Logs.debug('link_running_program: unlinking temp %s' % (temp_tgt))
os.unlink(temp_tgt)
except OSError as e:
if os.name != 'nt':
# other oses that unlink does what actually should won't hit this code path as much
# as win, and if it does, its more serious
Logs.error('link_running_program: Could not unlink %s, temporary file may cause issues' % (temp_tgt))
# stop if any file fails to delete. The exe will come first in this list, we want to
# leave the .pdb if it fails to delete the exe for any future debugging
# this may leave extra files, but shouldn't be harmful otherwise, and hopefully
# less harmful than deleting too many of them
break
return ret
# replace the link tasks' run function with the lambda above
if self._type in ['program', 'shlib']:
# replace the run method with a lambda, save the old run method so we can wrap/call it
self.link_task.run = patch_run
###############################################################################
@conf
def CryEngineStaticLibrary(ctx, *k, **kw):
"""
CryEngine Static Libraries are static libraries
Except the build configuration requires a monolithic build
"""
# Only create a test driver shared if the 'create_test_driver' is specified and set to true
create_test_driver = kw.get('create_test_driver', False)
if create_test_driver:
kw_static, kw_shared = ctx.split_keywords_from_static_lib_definition_for_test(kw)
ctx.CryEngineStaticLibraryLegacy(**kw_static)
if kw_shared:
ctx.CryEngineSharedLibrary(**kw_shared)
else:
ctx.CryEngineStaticLibraryLegacy(**kw)
| 41.393542 | 278 | 0.64707 |
a6875b1e2be1f79a96366c8e40b6f4e746ff5b50 | 2,299 | py | Python | ssseg/modules/models/samplers/ohempixelsampler.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | 411 | 2020-10-22T02:24:57.000Z | 2022-03-31T11:19:17.000Z | ssseg/modules/models/samplers/ohempixelsampler.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | 24 | 2020-12-21T03:53:54.000Z | 2022-03-17T06:50:00.000Z | ssseg/modules/models/samplers/ohempixelsampler.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | 59 | 2020-12-04T03:40:12.000Z | 2022-03-30T09:12:47.000Z | '''
Function:
OHEM pixel sampler
Author:
Zhenchao Jin
'''
import torch
import torch.nn.functional as F
from .base import BasePixelSampler
'''OHEM pixel sampler'''
class OHEMPixelSampler(BasePixelSampler):
def __init__(self, loss_func=None, thresh=None, min_kept_per_image=100000, ignore_index=255, **kwargs):
super(OHEMPixelSampler, self).__init__()
assert min_kept_per_image > 1
# set attrs
self.loss_func = loss_func
self.thresh = thresh
self.min_kept_per_image = min_kept_per_image
self.ignore_index = ignore_index
'''sample pixels that have high loss or with low prediction confidence'''
def sample(self, seg_logit, seg_label):
# seg_logit: (N, C, H, W), seg_label: (N, 1, H, W)
assert seg_logit.shape[2:] == seg_label.shape[2:]
assert seg_label.shape[1] == 1
# sample pixels
with torch.no_grad():
seg_label = seg_label.squeeze(1).long()
batch_kept = self.min_kept_per_image * seg_label.size(0)
valid_mask = (seg_label != self.ignore_index)
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if self.thresh is not None:
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[tmp_seg_label == self.ignore_index] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
sort_prob, sort_indices = seg_prob[valid_mask].sort()
if sort_prob.numel() > 0:
min_threshold = sort_prob[min(batch_kept, sort_prob.numel() - 1)]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
else:
losses = self.loss_func(
seg_logit,
seg_label,
)
_, sort_indices = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.
seg_weight[valid_mask] = valid_seg_weight
# seg_weight: (N, H, W)
return seg_weight | 41.8 | 107 | 0.601131 |
a3282a4c0613163e7e29d71ce4e9837597288f7d | 510 | py | Python | Algorithms/Greedy/Activity-Selection.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | 6 | 2021-03-21T02:24:05.000Z | 2021-04-05T01:32:13.000Z | Algorithms/Greedy/Activity-Selection.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | null | null | null | Algorithms/Greedy/Activity-Selection.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | null | null | null | # Activities:
# Columns:
# 1. Activity name
# 2. Activity start time
# 3. Activity end time
activities = [
['A1',0,6],
['A2',3,4],
['A3',1,2],
['A4',5,8],
['A5',5,7],
['A6',8,9]
]
def maxActivities(activities):
activities.sort(key=lambda x:x[2])
most_optimals = []
most_optimals.append(activities[0])
for i in range(len(activities)-1):
if most_optimals[-1][2]<=activities[i+1][1]:
most_optimals.append(activities[i+1])
return most_optimals
| 19.615385 | 46 | 0.584314 |
3b59c96ab3016c708b418a94fa13ddd9e0e08208 | 25,417 | py | Python | backend/handler.py | ioggstream/httpsig-org | 1592099b63efd5e3a1ee81067fd171a51055a793 | [
"MIT"
] | null | null | null | backend/handler.py | ioggstream/httpsig-org | 1592099b63efd5e3a1ee81067fd171a51055a793 | [
"MIT"
] | null | null | null | backend/handler.py | ioggstream/httpsig-org | 1592099b63efd5e3a1ee81067fd171a51055a793 | [
"MIT"
] | null | null | null | import json
try:
from http_parser.parser import HttpParser
except ImportError:
from http_parser.pyparser import HttpParser
import http_sfv
from urllib.parse import parse_qs
from Cryptodome.Signature import pss
from Cryptodome.Signature import pkcs1_15
from Cryptodome.Signature import DSS
from Cryptodome.Hash import SHA512
from Cryptodome.Hash import SHA384
from Cryptodome.Hash import SHA256
from Cryptodome.Hash import HMAC
from Cryptodome.PublicKey import RSA
from Cryptodome.PublicKey import ECC
from Cryptodome import Random
from Cryptodome.IO import PEM
from Cryptodome.IO import PKCS8
from Cryptodome.Signature.pss import MGF1
import base64
# used with RSA-PSS and jose PS512
mgf512 = lambda x, y: MGF1(x, y, SHA512)
# used with jose PS384
mgf384 = lambda x, y: MGF1(x, y, SHA384)
# used with jose PS256
mgf256 = lambda x, y: MGF1(x, y, SHA256)
def cors(event, controller):
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET"
}
}
def parse(event, context):
if not event['body']:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
msg = event['body'].encode('utf-8')
response = parse_components(msg)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*"
},
'body': json.dumps(response)
}
def parse_components(msg):
p = HttpParser()
p.execute(msg, len(msg))
response = {}
response['fields'] = []
for h in p.get_headers():
response['fields'].append(
{
'id': h.lower(),
'val': p.get_headers()[h] # Note: this normalizes the header value for us
}
)
# try to parse this as a dictionary, see if it works
try:
dic = http_sfv.Dictionary()
dic.parse(p.get_headers()[h].encode('utf-8'))
for k in dic:
response['fields'].append(
{
'id': h.lower(),
'key': k,
'val': str(dic[k])
}
)
response['fields'].append(
{
'id': h.lower(),
'sv': True,
'val': str(dic)
}
)
except ValueError:
# not a dictionary, not a problem
# try to parse this as a list, see if it works
try:
sv = http_sfv.List()
sv.parse(p.get_headers()[h].encode('utf-8'))
response['fields'].append(
{
'id': h.lower(),
'sv': True,
'val': str(sv)
}
)
except ValueError:
# try to parse this as an item, see if it works
try:
sv = http_sfv.Item()
sv.parse(p.get_headers()[h].encode('utf-8'))
response['fields'].append(
{
'id': h.lower(),
'sv': True,
'val': str(sv)
}
)
except ValueError:
pass
if 'signature-input' in p.get_headers():
# existing signatures, parse the values
siginputheader = http_sfv.Dictionary()
siginputheader.parse(p.get_headers()['signature-input'].encode('utf-8'))
sigheader = http_sfv.Dictionary()
sigheader.parse(p.get_headers()['signature'].encode('utf-8'))
siginputs = {}
for (k,v) in siginputheader.items():
siginput = {
'coveredComponents': [c.value for c in v], # todo: handle parameters
'params': {p:pv for (p,pv) in v.params.items()},
'value': str(v),
'signature': str(sigheader[k])
}
siginputs[k] = siginput
response['inputSignatures'] = siginputs
if p.get_status_code():
# response
response['derived'] = [
{
'id': '@status',
'val': p.get_status_code()
}
]
else:
# request
response['derived'] = [
{
'id': '@method',
'val': p.get_method()
},
{
'id': '@target-uri',
'val':
'https://' # TODO: this always assumes an HTTP connection for demo purposes
+ p.get_headers()['host'] # TODO: this library assumes HTTP 1.1
+ p.get_url()
},
{
'id': '@authority',
'val': p.get_headers()['host'] # TODO: this library assumes HTTP 1.1
},
{
'id': '@scheme',
'val': 'https' # TODO: this always assumes an HTTPS connection for demo purposes
},
{
'id': '@request-target',
'val': p.get_url()
},
{
'id': '@path',
'val': p.get_path()
},
{
'id': '@query',
'val': p.get_query_string()
}
]
qs = parse_qs(p.get_query_string())
for q in qs:
v = qs[q]
if len(v) == 1:
response['derived'].append(
{
'id': '@query-param',
'name': q,
'val': v[0]
}
)
elif len(v) > 1:
# Multiple values, undefined behavior?
for i in range(len(v)):
response['derived'].append(
{
'id': '@query-param',
'name': q,
'val': v[i],
'idx': i
}
)
return response
def input(event, context):
if not event['body']:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
data = json.loads(event['body'])
#print(data)
msg = data['msg'].encode('utf-8')
# re-parse the input message
components = parse_components(msg)
#print(components)
sigparams = http_sfv.InnerList()
base = '';
for cc in data['coveredComponents']:
c = cc['id']
if not c.startswith('@'):
# it's a header
i = http_sfv.Item(c.lower())
if 'key' in cc:
# try a dictionary header value
i.params['key'] = cc['key']
comp = next((x for x in components['fields'] if 'key' in x and x['id'] == c and x['key'] == cc['key']), None)
sigparams.append(i)
base += str(i)
base += ': '
base += comp['val']
base += "\n"
elif 'sv' in cc:
i.params['sv'] = True
comp = next((x for x in components['fields'] if x['id'] == c), None)
sigparams.append(i)
base += str(i)
base += ': '
base += comp['val']
base += "\n"
else:
comp = next((x for x in components['fields'] if x['id'] == c), None)
sigparams.append(i)
base += str(i)
base += ': '
base += comp['val']
base += "\n"
else:
# it's a derived value
i = http_sfv.Item(c)
comp = next((x for x in components['derived'] if x['id'] == c), None)
sigparams.append(i)
base += str(i)
base += ': '
base += comp['val']
base += "\n"
if 'created' in data:
sigparams.params['created'] = data['created']
if 'expires' in data:
sigparams.params['expires'] = data['expires']
if 'keyid' in data:
sigparams.params['keyid'] = data['keyid']
if 'nonce' in data:
sigparams.params['nonce'] = data['nonce']
if 'alg' in data:
sigparams.params['alg'] = data['alg']
sigparamstr = ''
sigparamstr += str(http_sfv.Item("@signature-params"))
sigparamstr += ": "
sigparamstr += str(sigparams)
base += sigparamstr
response = {
'signatureInput': base,
'signatureParams': str(sigparams)
}
#print(base)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*"
},
'body': json.dumps(response)
}
def sign(event, context):
if not event['body']:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
data = json.loads(event['body'])
siginput = data['signatureInput']
sigparams = data['signatureParams']
signingKeyType = data['signingKeyType']
alg = data['alg']
label = data['label']
key = None
sharedKey = None
jwk = None
if signingKeyType == 'x509':
key = parseKeyX509(data['signingKeyX509'])
elif signingKeyType == 'shared':
if alg != 'hmac-sha256':
# shared key type only good for hmac
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
sharedKey = data['signingKeyShared'].encode('utf-8')
elif signingKeyType == 'jwk':
key, jwk, sharedKey = parseKeyJwk(data['signingKeyJwk'])
else:
# unknown key type
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
if alg == 'jose' and signingKeyType != 'jwk':
# JOSE-driven algorithm choice only available for JWK formatted keys
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
if alg == 'rsa-pss-sha512':
h = SHA512.new(siginput.encode('utf-8'))
signer = pss.new(key, mask_func=mgf512, salt_bytes=64)
signed = http_sfv.Item(signer.sign(h))
elif alg == 'rsa-v1_5-sha256':
h = SHA256.new(siginput.encode('utf-8'))
signer = pkcs1_15.new(key)
signed = http_sfv.Item(signer.sign(h))
elif alg == 'ecdsa-p256-sha256':
h = SHA256.new(siginput.encode('utf-8'))
signer = DSS.new(key, 'fips-186-3')
signed = http_sfv.Item(signer.sign(h))
elif alg == 'hmac-sha256':
signer = HMAC.new(sharedKey, digestmod=SHA256)
signer.update(siginput.encode('utf-8'))
signed = http_sfv.Item(signer.digest())
elif alg == 'jose':
# we're doing JOSE algs based on the key value
if (not 'alg' in jwk) or (jwk['alg'] == 'none'):
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
elif jwk['alg'] == 'RS256':
h = SHA256.new(siginput.encode('utf-8'))
signer = pkcs1_15.new(key)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'RS384':
h = SHA384.new(siginput.encode('utf-8'))
signer = pkcs1_15.new(key)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'RS512':
h = SHA512.new(siginput.encode('utf-8'))
signer = pkcs1_15.new(key)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'PS256':
h = SHA256.new(siginput.encode('utf-8'))
signer = pss.new(key, mask_func=mgf256, salt_bytes=32)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'PS384':
h = SHA384.new(siginput.encode('utf-8'))
signer = pss.new(key, mask_func=mgf384, salt_bytes=48)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'PS512':
h = SHA512.new(siginput.encode('utf-8'))
signer = pss.new(key, mask_func=mgf512, salt_bytes=64)
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'HS256':
signer = HMAC.new(sharedKey, digestmod=SHA256)
signer.update(siginput.encode('utf-8'))
signed = http_sfv.Item(signer.digest())
elif jwk['alg'] == 'HS384':
signer = HMAC.new(sharedKey, digestmod=SHA384)
signer.update(siginput.encode('utf-8'))
signed = http_sfv.Item(signer.digest())
elif jwk['alg'] == 'HS256':
signer = HMAC.new(sharedKey, digestmod=SHA512)
signer.update(siginput.encode('utf-8'))
signed = http_sfv.Item(signer.digest())
elif jwk['alg'] == 'ES256':
h = SHA256.new(siginput.encode('utf-8'))
signer = DSS.new(key, 'fips-186-3')
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'ES384':
h = SHA384.new(siginput.encode('utf-8'))
signer = DSS.new(key, 'fips-186-3')
signed = http_sfv.Item(signer.sign(h))
elif jwk['alg'] == 'ES512':
h = SHA512.new(siginput.encode('utf-8'))
signer = DSS.new(key, 'fips-186-3')
signed = http_sfv.Item(signer.sign(h))
else:
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
else:
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
if not signed:
return {
'statusCode': 500,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
# by here, we know that we have the signed blob
#http_sfv.Item(signed)
encoded = base64.b64encode(signed.value)
sigparamheader = http_sfv.InnerList()
sigparamheader.parse(sigparams.encode('utf-8'))
siginputheader = http_sfv.Dictionary()
siginputheader[label] = sigparamheader
sigheader = http_sfv.Dictionary()
sigheader[label] = signed
headers = ''
headers += 'Signature-Input: ' + str(siginputheader)
headers += '\n'
headers += 'Signature: ' + str(sigheader)
response = {
'signatureOutput': encoded.decode('utf-8'),
'headers': headers
}
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*"
},
'body': json.dumps(response)
}
def verify(event, context):
if not event['body']:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
data = json.loads(event['body'])
msg = data['httpMsg']
siginput = data['signatureInput']
sigparams = data['signatureParams']
signingKeyType = data['signingKeyType']
alg = data['alg']
signature = http_sfv.Item()
signature.parse(data['signature'].encode('utf-8')) # the parser needs to be called explicitly in this way or else this is treated as a string
key = None
sharedKey = None
jwk = None
if signingKeyType == 'x509':
key = parseKeyX509(data['signingKeyX509'])
elif signingKeyType == 'shared':
if alg != 'hmac-sha256':
# shared key type only good for hmac
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
sharedKey = data['signingKeyShared'].encode('utf-8')
elif signingKeyType == 'jwk':
key, jwk, sharedKey = parseKeyJwk(data['signingKeyJwk'])
else:
# unknown key type
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
if alg == 'jose' and signingKeyType != 'jwk':
# JOSE-driven algorithm choice only available for JWK formatted keys
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
try:
verified = False
if alg == 'rsa-pss-sha512':
h = SHA512.new(siginput.encode('utf-8'))
verifier = pss.new(key, mask_func=mgf512, salt_bytes=64)
verifier.verify(h, signature.value)
verified = True
elif alg == 'rsa-v1_5-sha256':
h = SHA256.new(siginput.encode('utf-8'))
verifier = pkcs1_15.new(key)
verifier.verify(h, signature.value)
verified = True
elif alg == 'ecdsa-p256-sha256':
h = SHA256.new(siginput.encode('utf-8'))
verifier = DSS.new(key, 'fips-186-3')
verifier.verify(h, signature.value)
verified = True
elif alg == 'hmac-sha256':
verifier = HMAC.new(sharedKey, digestmod=SHA256)
verifier.update(siginput.encode('utf-8'))
verified = (verifier.digest() == signature.value)
elif alg == 'jose':
# we're doing JOSE algs based on the key value
if (not 'alg' in jwk) or (jwk['alg'] == 'none'):
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
elif jwk['alg'] == 'RS256':
h = SHA256.new(siginput.encode('utf-8'))
verifier = pkcs1_15.new(key)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'RS384':
h = SHA384.new(siginput.encode('utf-8'))
verifier = pkcs1_15.new(key)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'RS512':
h = SHA512.new(siginput.encode('utf-8'))
verifier = pkcs1_15.new(key)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'PS256':
h = SHA256.new(siginput.encode('utf-8'))
verifier = pss.new(key, mask_func=mgf256, salt_bytes=32)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'PS384':
h = SHA384.new(siginput.encode('utf-8'))
verifier = pss.new(key, mask_func=mgf384, salt_bytes=48)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'PS512':
h = SHA512.new(siginput.encode('utf-8'))
verifier = pss.new(key, mask_func=mgf512, salt_bytes=64)
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'HS256':
verifier = HMAC.new(sharedKey, digestmod=SHA256)
verifier.update(siginput.encode('utf-8'))
verified = (verifier.digest() == signature.value)
elif jwk['alg'] == 'HS384':
verifier = HMAC.new(sharedKey, digestmod=SHA384)
verifier.update(siginput.encode('utf-8'))
verified = (verifier.digest() == signature.value)
elif jwk['alg'] == 'HS512':
verifier = HMAC.new(sharedKey, digestmod=SHA512)
verifier.update(siginput.encode('utf-8'))
verified = (verifier.digest() == signature.value)
elif jwk['alg'] == 'ES256':
h = SHA256.new(siginput.encode('utf-8'))
verifier = DSS.new(key, 'fips-186-3')
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'ES384':
h = SHA384.new(siginput.encode('utf-8'))
verifier = DSS.new(key, 'fips-186-3')
verifier.verify(h, signature.value)
verified = True
elif jwk['alg'] == 'ES512':
h = SHA512.new(siginput.encode('utf-8'))
verifier = DSS.new(key, 'fips-186-3')
verifier.verify(h, signature.value)
verified = True
else:
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
else:
# unknown algorithm
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*"
}
}
except (ValueError, TypeError):
verified = False
response = {
'signatureVerified': verified
}
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*"
},
'body': json.dumps(response)
}
def parseKeyJwk(signingKey):
jwk = json.loads(signingKey)
key = None
sharedKey = None
if jwk['kty'] == 'RSA':
if 'd' in jwk:
# private key
if 'q' in jwk and 'p' in jwk:
# CRT
key = RSA.construct((
b64ToInt(jwk['n']),
b64ToInt(jwk['e']),
b64ToInt(jwk['d']),
b64ToInt(jwk['p']),
b64ToInt(jwk['q'])
))
else:
# no CRT
key = RSA.construct((
b64ToInt(jwk['n']),
b64ToInt(jwk['e']),
b64ToInt(jwk['d'])
))
else:
# public key
key = RSA.construct((
b64ToInt(jwk['n']),
b64ToInt(jwk['e'])
))
elif jwk['kty'] == 'oct':
sharedKey = base64.urlsafe_b64decode(jwk['k'] + '===')
elif jwk['kty'] == 'EC':
if 'd' in jwk:
# private key
key = ECC.construct(
curve = jwk['crv'],
d = b64ToInt(jwk['d']),
point_x = b64ToInt(jwk['x']),
point_y = b64ToInt(jwk['y'])
)
else:
# public key
key = ECC.construct(
curve = jwk['crv'],
point_x = b64ToInt(jwk['x']),
point_y = b64ToInt(jwk['y'])
)
elif jwk['kty'] == 'OKP':
return (None, None, None)
else:
return (None, None, None)
return (key, jwk, sharedKey)
def b64ToInt(s):
# convert string to integer
if s:
return int.from_bytes(base64.urlsafe_b64decode(s + '==='), 'big')
else:
return None
def parseKeyX509(signingKey):
# try parsing a few different key formats
key = None
#print(1)
# PKCS8 Wrapped Key
try:
#print(2)
decoded = PEM.decode(signingKey)[0]
#print(12)
unwrapped = PKCS8.unwrap(decoded)[1]
#print(3)
try:
# RSA first
key = RSA.import_key(unwrapped)
#print(4)
except (ValueError, IndexError, TypeError):
try:
# EC if possible
key = ECC.import_key(unwrapped)
#print(5)
except (ValueError, IndexError, TypeError):
key = None
#print(6)
except (ValueError, IndexError, TypeError) as err:
#print(err)
key = None
#print(7)
# if we successfully parsed a key, return it
if key:
#print(8)
return key
# if there's no key yet, try an unwrapped certificate
try:
# Plain RSA Key
key = RSA.import_key(signingKey)
#print(9)
except (ValueError, IndexError, TypeError):
# plain EC key
try:
key = ECC.import_key(signingKey)
#print(13)
except (ValueError, IndexError, TypeError) as err:
#print(err)
key = None
#print(10)
#print(11)
return key
| 30.845874 | 145 | 0.46225 |
ffc83dd91b9ba2dbcfb15d1a72aab03516d59d5a | 3,541 | py | Python | gui/kivy/uix/dialogs/bump_fee_dialog.py | CommerciumBlockchain/CommerciumElectro | 5448d621b6ebf28f93064cdefcf5d86878044c17 | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/bump_fee_dialog.py | CommerciumBlockchain/CommerciumElectro | 5448d621b6ebf28f93064cdefcf5d86878044c17 | [
"MIT"
] | null | null | null | gui/kivy/uix/dialogs/bump_fee_dialog.py | CommerciumBlockchain/CommerciumElectro | 5448d621b6ebf28f93064cdefcf5d86878044c17 | [
"MIT"
] | 5 | 2018-06-23T13:54:52.000Z | 2019-04-28T00:46:43.000Z | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from commerciumelectro.util import fee_levels
from commerciumelectro_gui.kivy.i18n import _
Builder.load_string('''
<BumpFeeDialog@Popup>
title: _('Bump fee')
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
id: old_fee
text: _('Current Fee')
value: ''
BoxLabel:
id: new_fee
text: _('New Fee')
value: ''
Label:
id: tooltip
text: ''
size_hint_y: None
Slider:
id: slider
range: 0, 4
step: 1
on_value: root.on_slider(self.value)
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('Final')
CheckBox:
id: final_cb
Widget:
size_hint: 1, 1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Button:
text: _('Cancel')
size_hint: 0.5, None
height: '48dp'
on_release: root.dismiss()
Button:
text: _('OK')
size_hint: 0.5, None
height: '48dp'
on_release:
root.dismiss()
root.on_ok()
''')
class BumpFeeDialog(Factory.Popup):
def __init__(self, app, fee, size, callback):
Factory.Popup.__init__(self)
self.app = app
self.init_fee = fee
self.tx_size = size
self.callback = callback
self.config = app.electrum_config
self.fee_step = self.config.max_fee_rate() / 10
self.dynfees = self.config.get('dynamic_fees', True) and self.app.network
self.ids.old_fee.value = self.app.format_amount_and_units(self.init_fee)
self.update_slider()
self.update_text()
def update_text(self):
value = int(self.ids.slider.value)
self.ids.new_fee.value = self.app.format_amount_and_units(self.get_fee())
if self.dynfees:
value = int(self.ids.slider.value)
self.ids.tooltip.text = fee_levels[value]
def update_slider(self):
slider = self.ids.slider
if self.dynfees:
slider.range = (0, 4)
slider.step = 1
slider.value = 3
else:
slider.range = (1, 10)
slider.step = 1
rate = self.init_fee*1000//self.tx_size
slider.value = min( rate * 2 // self.fee_step, 10)
def get_fee(self):
value = int(self.ids.slider.value)
if self.dynfees:
if self.config.has_fee_estimates():
dynfee = self.config.dynfee(value)
return int(dynfee * self.tx_size // 1000)
else:
return int(value*self.fee_step * self.tx_size // 1000)
def on_ok(self):
new_fee = self.get_fee()
is_final = self.ids.final_cb.active
self.callback(self.init_fee, new_fee, is_final)
def on_slider(self, value):
self.update_text()
def on_checkbox(self, b):
self.dynfees = b
self.update_text()
| 29.508333 | 81 | 0.530076 |
4815861e42e326967feb270f812894eb605c1b30 | 6,328 | py | Python | model_analyzer/analyzer.py | ahiroto/model_analyzer | 4dbd47c0e71d66d90526d5523570e2c6f717d5bc | [
"Apache-2.0"
] | null | null | null | model_analyzer/analyzer.py | ahiroto/model_analyzer | 4dbd47c0e71d66d90526d5523570e2c6f717d5bc | [
"Apache-2.0"
] | null | null | null | model_analyzer/analyzer.py | ahiroto/model_analyzer | 4dbd47c0e71d66d90526d5523570e2c6f717d5bc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_manager import ModelManager
from .result.result_manager import ResultManager
from .record.metrics_manager import MetricsManager
from .reports.report_manager import ReportManager
from .config.input.config_command_analyze \
import ConfigCommandAnalyze
from .config.input.config_command_report \
import ConfigCommandReport
from .config.input.config_command_profile \
import ConfigCommandProfile
from .model_analyzer_exceptions \
import TritonModelAnalyzerException
import logging
class Analyzer:
"""
A class responsible for coordinating the various components of the
model_analyzer. Configured with metrics to monitor, exposes profiling and
result writing methods.
"""
def __init__(self, config, server, state_manager):
"""
Parameters
----------
config : Config
Model Analyzer config
server : TritonServer
Server handle
state_manager: AnalyzerStateManager
The object that maintains Model Analyzer State
"""
self._config = config
self._server = server
self._state_manager = state_manager
state_manager.load_checkpoint()
self._result_manager = ResultManager(config=config,
state_manager=self._state_manager)
def profile(self, client):
"""
Subcommand: PROFILE
Configures RunConfigGenerator, then
profiles for each run_config
Parameters
----------
client : TritonClient
Instance used to load/unload models
Raises
------
TritonModelAnalyzerException
"""
if not isinstance(self._config, ConfigCommandProfile):
raise TritonModelAnalyzerException(
f"Expected config of type {ConfigCommandProfile},"
" got {type(self._config)}.")
logging.info('Profiling server only metrics...')
self._metrics_manager = MetricsManager(
config=self._config,
client=client,
server=self._server,
result_manager=self._result_manager,
state_manager=self._state_manager)
self._model_manager = ModelManager(
config=self._config,
client=client,
server=self._server,
result_manager=self._result_manager,
metrics_manager=self._metrics_manager,
state_manager=self._state_manager)
# Get metrics for server only
self._server.start()
client.wait_for_server_ready(self._config.max_retries)
self._metrics_manager.profile_server()
self._server.stop()
# Profile each model, save state after each
for model in self._config.profile_models:
if self._state_manager.exiting():
break
try:
self._model_manager.run_model(model=model)
finally:
self._state_manager.save_checkpoint()
profiled_model_list = list(
self._state_manager.get_state_variable(
'ResultManager.results').keys())
logging.info(
f"Finished profiling. Obtained measurements for models: {profiled_model_list}."
)
def analyze(self, mode):
"""
subcommand: ANALYZE
Constructs results from measurements,
sorts them, and dumps them to tables.
Parameters
----------
mode : str
Global mode that the analyzer is running on
"""
if not isinstance(self._config, ConfigCommandAnalyze):
raise TritonModelAnalyzerException(
f"Expected config of type {ConfigCommandAnalyze}, got {type(self._config)}."
)
gpu_info = self._state_manager.get_state_variable(
'MetricsManager.gpus')
if not gpu_info:
gpu_info = {}
self._report_manager = ReportManager(
mode=mode,
config=self._config,
gpu_info=gpu_info,
result_manager=self._result_manager)
# Create result tables, put top results and get stats
dcgm_metrics, perf_metrics, cpu_metrics = \
MetricsManager.categorize_metrics()
self._result_manager.create_tables(
gpu_specific_metrics=dcgm_metrics,
non_gpu_specific_metrics=perf_metrics + cpu_metrics)
self._result_manager.compile_and_sort_results()
if self._config.summarize:
self._report_manager.create_summaries()
self._report_manager.export_summaries()
# Dump to tables and write to disk
self._result_manager.tabulate_results()
self._result_manager.write_and_export_results()
def report(self, mode):
"""
Subcommand: REPORT
Generates detailed information on
one or more model configs
Parameters
----------
mode : str
Global mode that the analyzer is running on
"""
if not isinstance(self._config, ConfigCommandReport):
raise TritonModelAnalyzerException(
f"Expected config of type {ConfigCommandReport}, got {type(self._config)}."
)
gpu_info = self._state_manager.get_state_variable(
'MetricsManager.gpus')
if not gpu_info:
gpu_info = {}
self._report_manager = ReportManager(
mode=mode,
config=self._config,
result_manager=self._result_manager,
gpu_info=gpu_info)
self._report_manager.create_detailed_reports()
self._report_manager.export_detailed_reports()
| 32.958333 | 92 | 0.638274 |
13e5682bb95ddfeb497966933c9e147164ac3110 | 391 | py | Python | Modulo_4/semana 3/practica sabado/reporte/reporte/wsgi.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_4/semana 3/practica sabado/reporte/reporte/wsgi.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_4/semana 3/practica sabado/reporte/reporte/wsgi.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | """
WSGI config for reporte project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reporte.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
e11048f8c5d9320730f00a77e33a9c2abebdbade | 7,417 | py | Python | autoarray/structures/arrays/one_d/array_1d_util.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | 5 | 2019-09-26T02:18:25.000Z | 2021-12-11T16:29:20.000Z | autoarray/structures/arrays/one_d/array_1d_util.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | 3 | 2020-03-30T14:25:57.000Z | 2021-12-21T17:10:55.000Z | autoarray/structures/arrays/one_d/array_1d_util.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | 4 | 2020-03-03T11:35:41.000Z | 2022-01-21T17:37:35.000Z | import os
import numpy as np
from astropy.io import fits
from autoarray import numba_util
from autoarray.mask import mask_1d_util
@numba_util.jit()
def array_1d_slim_from(
array_1d_native: np.ndarray, mask_1d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a 1D array and mask, map the values of all unmasked pixels to its slimmed 1D array.
The 1D array has native dimensions corresponding to the array pixels (without masking) multiplied by the
sub_size. For example if a native array is shape [total_unmasked_pixels] and the ``sub_size=2``, the array is
shape [total_unmasked_x_pixels*sub_size].
The pixel coordinate origin is at the left of the 1D array and goes right, with pixels then going right in
each pixel. For example, for an array of shape (3,) and a sub-grid size of 2 where all pixels are unmasked:
- pixel[0] of the native 1D array will correspond to index 0 of the slim array (which is the first sub-pixel in
the array).
- pixel[1] of the native 1D array will correspond to index 1 of the slim array (which is the second sub-pixel in
the array).
If the native array is masked and the third pixel is masked (e.g. its mask_1d entry is `True`) then:
- pixels [0], [1], [2] and [3] of the native 1D array will correspond to indexes 0, 1, 2, 3 of the slim array.
- pixels [4] and [5] of the native 1D array do not map to the slim array (these sub-pixels are masked).
- pixel [6], [7], etc. of the native 1D array will correspond to indexes 4, 5, etc. of the slim array.
Parameters
----------
array_1d_native
A 1D array of values on the dimensions of the native array.
mask_1d
A 1D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The sub-grid size of the array.
Returns
-------
ndarray
The slimmed 1D array of values mapped from the native 1d array with
dimensions [total_unmasked_pixels*sub_size].
Examples
--------
array_1d_native = np.array([ 1.0, 2.0, 5.0, 6.0])
mask = np.array([True, False, False, False]])
array_1d_slim = array_1d_slim_from(array_1d_native, array_2d=array_2d, sub_size=2)
"""
total_sub_pixels = mask_1d_util.total_sub_pixels_1d_from(
mask_1d=mask_1d, sub_size=sub_size
)
line_1d_slim = np.zeros(shape=total_sub_pixels)
index = 0
for x in range(mask_1d.shape[0]):
if not mask_1d[x]:
for x1 in range(sub_size):
line_1d_slim[index] = array_1d_native[x * sub_size + x1]
index += 1
return line_1d_slim
def array_1d_native_from(
array_1d_slim: np.ndarray, mask_1d: np.ndarray, sub_size: int
) -> np.ndarray:
sub_shape = mask_1d.shape[0] * sub_size
native_index_for_slim_index_1d = mask_1d_util.native_index_for_slim_index_1d_from(
mask_1d=mask_1d, sub_size=sub_size
).astype("int")
return array_1d_via_indexes_1d_from(
array_1d_slim=array_1d_slim,
sub_shape=sub_shape,
native_index_for_slim_index_1d=native_index_for_slim_index_1d,
)
@numba_util.jit()
def array_1d_via_indexes_1d_from(
array_1d_slim: np.ndarray,
sub_shape: int,
native_index_for_slim_index_1d: np.ndarray,
) -> np.ndarray:
"""
For a slimmed 1D array with sub-indexes mapping the slimmed array values to their native array indexes,
return the native 1D array.
The 1D array has dimensions correspond to the size of the 1D array multiplied by the sub_size. For example
if an array is shape [total_x_pixels] and the `sub_size=2`, the array is shape [total_x_pixels*sub_size].
The pixel coordinate origin is at the left of the 1D array and goes right, with sub-pixels then going right in
each pixel. For example, for an array of shape (3,3) and a sub-grid size of 2 where all pixels are unmasked:
- pixel[0] of the native 1D array will correspond to index 0 of the slim array (which is the first sub-pixel in
the line).
- pixel[1] of the native 1D array will correspond to index 1 of the slim array (which is the second sub-pixel in
the line).
If the native line is masked and the third pixel is masked (e.g. its mask_1d entry is `True`) then:
- pixels [0], [1], [2] and [3] of the native 1D array will correspond to indexes 0, 1, 2, 3 of the slim array.
- pixels [4] and [5] of the native 1D array do not map to the slim array (these sub-pixels are masked).
- pixel [6], [7], etc. of the native 1D array will correspond to indexes 4, 5, etc. of the slim array.
Parameters
----------
array_1d_slim
The slimmed array of shape [total_x_pixels*sub_size] which are mapped to the native array.
sub_shape
The 1D dimensions of the native 1D sub line.
native_index_for_slim_index_1d : np.narray
An array of shape [total_x_pixels*sub_size] that maps from the slimmed array to the native array.
Returns
-------
ndarray
The native 1D array of values mapped from the slimmed array with dimensions (total_x_pixels).
"""
array_1d_native = np.zeros(sub_shape)
for slim_index in range(len(native_index_for_slim_index_1d)):
array_1d_native[native_index_for_slim_index_1d[slim_index]] = array_1d_slim[
slim_index
]
return array_1d_native
def numpy_array_1d_to_fits(
array_1d: np.ndarray, file_path: str, overwrite: bool = False
):
"""
Write a 1D NumPy array to a .fits file.
Parameters
----------
array_2d
The 2D array that is written to fits.
file_path : str
The full path of the file that is output, including the file name and ``.fits`` extension.
overwrite : bool
If `True` and a file already exists with the input file_path the .fits file is overwritten. If False, an error
will be raised.
Returns
-------
None
Examples
--------
array_2d = np.ones((5,5))
numpy_array_to_fits(array_2d=array_2d, file_path='/path/to/file/filename.fits', overwrite=True)
"""
file_dir = os.path.split(file_path)[0]
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if overwrite and os.path.exists(file_path):
os.remove(file_path)
new_hdr = fits.Header()
hdu = fits.PrimaryHDU(array_1d, new_hdr)
hdu.writeto(file_path)
def numpy_array_1d_via_fits_from(file_path: str, hdu: int):
"""
Read a 1D NumPy array from a .fits file.
After loading the NumPy array, the array is flipped upside-down using np.flipud. This is so that the structures
appear the same orientation as .fits files loaded in DS9.
Parameters
----------
file_path : str
The full path of the file that is loaded, including the file name and ``.fits`` extension.
hdu
The HDU extension of the array that is loaded from the .fits file.
Returns
-------
ndarray
The NumPy array that is loaded from the .fits file.
Examples
--------
array_2d = numpy_array_via_fits(file_path='/path/to/file/filename.fits', hdu=0)
"""
hdu_list = fits.open(file_path)
return np.array(hdu_list[hdu].data)
| 35.658654 | 119 | 0.66442 |
19ee91efbcf04a7147aa5834b07eea76e3bf8db0 | 5,380 | py | Python | python/ycm_simple_conf.py | chengxie/ycm_simple_conf | 38e5db4fb74b0b4bf2d73001c7ba5489893b53b1 | [
"Zlib"
] | null | null | null | python/ycm_simple_conf.py | chengxie/ycm_simple_conf | 38e5db4fb74b0b4bf2d73001c7ba5489893b53b1 | [
"Zlib"
] | null | null | null | python/ycm_simple_conf.py | chengxie/ycm_simple_conf | 38e5db4fb74b0b4bf2d73001c7ba5489893b53b1 | [
"Zlib"
] | null | null | null | # ycm_simple_conf - ycm_simple_conf.py
# Created by Thomas Da Costa <tdc.input@gmail.com>
# Copyright (C) 2014 Thomas Da Costa
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import os
import logging
import xml.etree.ElementTree as et
import re
import subprocess
class SimpleConf(object):
def __init__(self, file_name):
self.m_compiled_file = file_name
self.m_root_dir = None
self.m_config_file = None
self.m_project_type = 'c++'
self.m_user_cxxflags = list()
self.m_user_include_path = list()
self.m_default_include_path = list()
self.seek_config_file(os.path.dirname(self.m_compiled_file))
self.parse_config_file()
self.fetch_default_include_path()
@property
def compiled_file(self):
return self.m_compiled_file
@property
def root_dir(self):
return self.m_root_dir
@property
def config_file(self):
return self.m_config_file
@property
def project_type(self):
return self.m_project_type
@property
def user_cxxflags(self):
return self.m_user_cxxflags
@property
def user_include_path(self):
return self.m_user_include_path
@property
def default_include_path(self):
return self.m_default_include_path
@property
def flags(self):
flags = ['-Wall']
if self.m_project_type == 'c':
flags.extend(['-x', 'c'])
else:
flags.extend(['-x', 'c++'])
for include in self.m_default_include_path:
flags.extend(['-isystem', include])
for f in self.m_user_cxxflags:
flags.extend([f])
for include in self.m_user_include_path:
flags.extend(['-I', include])
return flags
def seek_config_file(self, dir_name):
if dir_name == '' or dir_name == '/':
logging.warning('Config file not found')
return
files = [os.path.join(dir_name, f) for f in os.listdir(dir_name)]
files = [f for f in files if os.path.isfile(f)]
for f in files:
if os.path.basename(f) == '.ycm.xml':
self.m_root_dir = dir_name
self.m_config_file = os.path.join(dir_name, f)
logging.info('Config file found: %s' % self.m_config_file)
return
self.seek_config_file(os.path.dirname(dir_name))
def parse_config_file(self):
if not self.m_config_file:
return
try:
project = et.parse(self.m_config_file).getroot()
if project.tag != 'project':
raise Exception
self.m_project_type = project.attrib['type']
if self.m_project_type not in ['c', 'c++']:
raise Exception
for cxxflag in project.iter('cxxflag'):
name = str.strip(cxxflag.attrib['name'])
self.m_user_cxxflags.append(name)
logging.info('Adding to user cxxflag: %s' % name)
for include in project.iter('include'):
inc = os.path.join(self.m_root_dir, include.attrib['path'])
inc = str.strip(inc)
self.m_user_include_path.append(inc)
logging.info('Adding to user include path: %s' % inc)
self.m_user_include_path.append(self.m_root_dir)
except Exception as e:
logging.error('Failed to parse config file: %s' % e.message)
def fetch_default_include_path(self):
try:
devnull = open('/dev/null', 'r')
err = subprocess.check_output(
['sh', '-c', 'LC_ALL=C cpp -x ' + self.m_project_type + ' -v'],
stdin=devnull,
stderr=subprocess.STDOUT
)
pattern = re.compile(
'#include \<\.{3}\>.*\:(.+)End of search list\.',
re.DOTALL
)
match = pattern.search(err.decode())
if match:
lines = str.splitlines(match.group(1))
for inc in [str.strip(l) for l in lines if l]:
logging.info('Adding to default include path: %s' % inc)
self.m_default_include_path.append(inc)
except Exception:
logging.error('Failed to run: cpp -x %s -v' % self.m_project_type)
def FlagsForFile(file_name, **kwargs):
simple_conf = SimpleConf(file_name)
flags = simple_conf.flags
logging.info('Flags used by clang: %s' % flags)
return {
'flags': flags,
'do_cache': True
}
| 34.709677 | 79 | 0.607063 |
851536694eef2e5e42337b50d409b50e1f2ae1bc | 15,390 | py | Python | external-import/crowdstrike/src/crowdstrike/core.py | opencti-platform/connectors | a1ed484f2d275f99cf10630dc2d29ce355ee8ce3 | [
"Apache-2.0"
] | null | null | null | external-import/crowdstrike/src/crowdstrike/core.py | opencti-platform/connectors | a1ed484f2d275f99cf10630dc2d29ce355ee8ce3 | [
"Apache-2.0"
] | null | null | null | external-import/crowdstrike/src/crowdstrike/core.py | opencti-platform/connectors | a1ed484f2d275f99cf10630dc2d29ce355ee8ce3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike connector core module."""
import os
import time
from typing import Any, Dict, List, Mapping, Optional
import stix2
import yaml
from crowdstrike.actor.importer import ActorImporter
from crowdstrike.importer import BaseImporter
from crowdstrike.indicator.importer import IndicatorImporter, IndicatorImporterConfig
from crowdstrike.report.importer import ReportImporter
from crowdstrike.rule.yara_master_importer import YaraMasterImporter
from crowdstrike.utils import (
convert_comma_separated_str_to_list,
create_organization,
get_tlp_string_marking_definition,
is_timestamp_in_future,
timestamp_to_datetime,
)
from crowdstrike.utils.constants import DEFAULT_TLP_MARKING_DEFINITION
from crowdstrike_client.client import CrowdStrikeClient
from pycti import OpenCTIConnectorHelper # type: ignore
from pycti.connector.opencti_connector_helper import get_config_variable # type: ignore
class CrowdStrike:
"""CrowdStrike connector."""
_CONFIG_NAMESPACE = "crowdstrike"
_CONFIG_BASE_URL = f"{_CONFIG_NAMESPACE}.base_url"
_CONFIG_CLIENT_ID = f"{_CONFIG_NAMESPACE}.client_id"
_CONFIG_CLIENT_SECRET = f"{_CONFIG_NAMESPACE}.client_secret"
_CONFIG_INTERVAL_SEC = f"{_CONFIG_NAMESPACE}.interval_sec"
_CONFIG_SCOPES = f"{_CONFIG_NAMESPACE}.scopes"
_CONFIG_TLP = f"{_CONFIG_NAMESPACE}.tlp"
_CONFIG_CREATE_OBSERVABLES = f"{_CONFIG_NAMESPACE}.create_observables"
_CONFIG_CREATE_INDICATORS = f"{_CONFIG_NAMESPACE}.create_indicators"
_CONFIG_ACTOR_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.actor_start_timestamp"
_CONFIG_REPORT_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.report_start_timestamp"
_CONFIG_REPORT_INCLUDE_TYPES = f"{_CONFIG_NAMESPACE}.report_include_types"
_CONFIG_REPORT_STATUS = f"{_CONFIG_NAMESPACE}.report_status"
_CONFIG_REPORT_TYPE = f"{_CONFIG_NAMESPACE}.report_type"
_CONFIG_REPORT_GUESS_MALWARE = f"{_CONFIG_NAMESPACE}.report_guess_malware"
_CONFIG_INDICATOR_START_TIMESTAMP = f"{_CONFIG_NAMESPACE}.indicator_start_timestamp"
_CONFIG_INDICATOR_EXCLUDE_TYPES = f"{_CONFIG_NAMESPACE}.indicator_exclude_types"
_CONFIG_INDICATOR_LOW_SCORE = f"{_CONFIG_NAMESPACE}.indicator_low_score"
_CONFIG_INDICATOR_LOW_SCORE_LABELS = (
f"{_CONFIG_NAMESPACE}.indicator_low_score_labels"
)
_CONFIG_UPDATE_EXISTING_DATA = "connector.update_existing_data"
_CONFIG_SCOPE_ACTOR = "actor"
_CONFIG_SCOPE_REPORT = "report"
_CONFIG_SCOPE_INDICATOR = "indicator"
_CONFIG_SCOPE_YARA_MASTER = "yara_master"
_CONFIG_REPORT_STATUS_MAPPING = {
"new": 0,
"in progress": 1,
"analyzed": 2,
"closed": 3,
}
_DEFAULT_CREATE_OBSERVABLES = True
_DEFAULT_CREATE_INDICATORS = True
_DEFAULT_REPORT_TYPE = "threat-report"
_DEFAULT_INDICATOR_LOW_SCORE = 40
_CONNECTOR_RUN_INTERVAL_SEC = 60
_STATE_LAST_RUN = "last_run"
def __init__(self) -> None:
"""Initialize CrowdStrike connector."""
config = self._read_configuration()
# CrowdStrike connector configuration
base_url = self._get_configuration(config, self._CONFIG_BASE_URL)
client_id = self._get_configuration(config, self._CONFIG_CLIENT_ID)
client_secret = self._get_configuration(config, self._CONFIG_CLIENT_SECRET)
self.interval_sec = self._get_configuration(
config, self._CONFIG_INTERVAL_SEC, is_number=True
)
scopes_str = self._get_configuration(config, self._CONFIG_SCOPES)
scopes = set()
if scopes_str is not None:
scopes = set(convert_comma_separated_str_to_list(scopes_str))
tlp = self._get_configuration(config, self._CONFIG_TLP)
tlp_marking = self._convert_tlp_to_marking_definition(tlp)
create_observables = self._get_configuration(
config, self._CONFIG_CREATE_OBSERVABLES
)
if create_observables is None:
create_observables = self._DEFAULT_CREATE_OBSERVABLES
else:
create_observables = bool(create_observables)
create_indicators = self._get_configuration(
config, self._CONFIG_CREATE_INDICATORS
)
if create_indicators is None:
create_indicators = self._DEFAULT_CREATE_INDICATORS
else:
create_indicators = bool(create_indicators)
actor_start_timestamp = self._get_configuration(
config, self._CONFIG_ACTOR_START_TIMESTAMP, is_number=True
)
if is_timestamp_in_future(actor_start_timestamp):
raise ValueError("Actor start timestamp is in the future")
report_start_timestamp = self._get_configuration(
config, self._CONFIG_REPORT_START_TIMESTAMP, is_number=True
)
if is_timestamp_in_future(report_start_timestamp):
raise ValueError("Report start timestamp is in the future")
report_status_str = self._get_configuration(config, self._CONFIG_REPORT_STATUS)
report_status = self._convert_report_status_str_to_report_status_int(
report_status_str
)
report_type = self._get_configuration(config, self._CONFIG_REPORT_TYPE)
if not report_type:
report_type = self._DEFAULT_REPORT_TYPE
report_include_types_str = self._get_configuration(
config, self._CONFIG_REPORT_INCLUDE_TYPES
)
report_include_types = []
if report_include_types_str is not None:
report_include_types = convert_comma_separated_str_to_list(
report_include_types_str
)
report_guess_malware = bool(
self._get_configuration(config, self._CONFIG_REPORT_GUESS_MALWARE)
)
indicator_start_timestamp = self._get_configuration(
config, self._CONFIG_INDICATOR_START_TIMESTAMP, is_number=True
)
if is_timestamp_in_future(indicator_start_timestamp):
raise ValueError("Indicator start timestamp is in the future")
indicator_exclude_types_str = self._get_configuration(
config, self._CONFIG_INDICATOR_EXCLUDE_TYPES
)
indicator_exclude_types = []
if indicator_exclude_types_str is not None:
indicator_exclude_types = convert_comma_separated_str_to_list(
indicator_exclude_types_str
)
indicator_low_score = self._get_configuration(
config, self._CONFIG_INDICATOR_LOW_SCORE, is_number=True
)
if indicator_low_score is None:
indicator_low_score = self._DEFAULT_INDICATOR_LOW_SCORE
indicator_low_score_labels_str = self._get_configuration(
config, self._CONFIG_INDICATOR_LOW_SCORE_LABELS
)
indicator_low_score_labels = []
if indicator_low_score_labels_str is not None:
indicator_low_score_labels = convert_comma_separated_str_to_list(
indicator_low_score_labels_str
)
update_existing_data = bool(
self._get_configuration(config, self._CONFIG_UPDATE_EXISTING_DATA)
)
author = self._create_author()
# Create OpenCTI connector helper.
self.helper = OpenCTIConnectorHelper(config)
# Create CrowdStrike client and importers.
client = CrowdStrikeClient(base_url, client_id, client_secret)
# Create importers.
importers: List[BaseImporter] = []
if self._CONFIG_SCOPE_ACTOR in scopes:
actor_importer = ActorImporter(
self.helper,
client.intel_api.actors,
update_existing_data,
author,
actor_start_timestamp,
tlp_marking,
)
importers.append(actor_importer)
if self._CONFIG_SCOPE_REPORT in scopes:
report_importer = ReportImporter(
self.helper,
client.intel_api.reports,
update_existing_data,
author,
report_start_timestamp,
tlp_marking,
report_include_types,
report_status,
report_type,
report_guess_malware,
)
importers.append(report_importer)
if self._CONFIG_SCOPE_INDICATOR in scopes:
indicator_importer_config = IndicatorImporterConfig(
helper=self.helper,
indicators_api=client.intel_api.indicators,
reports_api=client.intel_api.reports,
update_existing_data=update_existing_data,
author=author,
default_latest_timestamp=indicator_start_timestamp,
tlp_marking=tlp_marking,
create_observables=create_observables,
create_indicators=create_indicators,
exclude_types=indicator_exclude_types,
report_status=report_status,
report_type=report_type,
indicator_low_score=indicator_low_score,
indicator_low_score_labels=set(indicator_low_score_labels),
)
indicator_importer = IndicatorImporter(indicator_importer_config)
importers.append(indicator_importer)
if self._CONFIG_SCOPE_YARA_MASTER in scopes:
yara_master_importer = YaraMasterImporter(
self.helper,
client.intel_api.rules,
client.intel_api.reports,
author,
tlp_marking,
update_existing_data,
report_status,
report_type,
)
importers.append(yara_master_importer)
self.importers = importers
@staticmethod
def _read_configuration() -> Dict[str, str]:
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/../config.yml"
if not os.path.isfile(config_file_path):
return {}
return yaml.load(open(config_file_path), Loader=yaml.FullLoader)
@staticmethod
def _create_author() -> stix2.Identity:
return create_organization("CrowdStrike")
@staticmethod
def _get_yaml_path(config_name: str) -> List[str]:
return config_name.split(".")
@staticmethod
def _get_environment_variable_name(yaml_path: List[str]) -> str:
return "_".join(yaml_path).upper()
@classmethod
def _get_configuration(
cls, config: Dict[str, Any], config_name: str, is_number: bool = False
) -> Any:
yaml_path = cls._get_yaml_path(config_name)
env_var_name = cls._get_environment_variable_name(yaml_path)
config_value = get_config_variable(
env_var_name, yaml_path, config, isNumber=is_number
)
return config_value
@classmethod
def _convert_tlp_to_marking_definition(
cls, tlp_value: Optional[str]
) -> stix2.MarkingDefinition:
if tlp_value is None:
return DEFAULT_TLP_MARKING_DEFINITION
return get_tlp_string_marking_definition(tlp_value)
@classmethod
def _convert_report_status_str_to_report_status_int(cls, report_status: str) -> int:
return cls._CONFIG_REPORT_STATUS_MAPPING[report_status.lower()]
def _load_state(self) -> Dict[str, Any]:
current_state = self.helper.get_state()
if not current_state:
return {}
return current_state
@staticmethod
def _get_state_value(
state: Optional[Mapping[str, Any]], key: str, default: Optional[Any] = None
) -> Any:
if state is not None:
return state.get(key, default)
return default
@classmethod
def _sleep(cls, delay_sec: Optional[int] = None) -> None:
sleep_delay = (
delay_sec if delay_sec is not None else cls._CONNECTOR_RUN_INTERVAL_SEC
)
time.sleep(sleep_delay)
def _is_scheduled(self, last_run: Optional[int], current_time: int) -> bool:
if last_run is None:
self._info("CrowdStrike connector clean run")
return True
time_diff = current_time - last_run
return time_diff >= self._get_interval()
@staticmethod
def _current_unix_timestamp() -> int:
return int(time.time())
def run(self):
"""Run CrowdStrike connector."""
self._info("Starting CrowdStrike connector...")
if not self.importers:
self._error("Scope(s) not configured.")
return
while True:
self._info("Running CrowdStrike connector...")
run_interval = self._CONNECTOR_RUN_INTERVAL_SEC
try:
timestamp = self._current_unix_timestamp()
current_state = self._load_state()
self.helper.log_info(f"Loaded state: {current_state}")
last_run = self._get_state_value(current_state, self._STATE_LAST_RUN)
if self._is_scheduled(last_run, timestamp):
work_id = self._initiate_work(timestamp)
new_state = current_state.copy()
for importer in self.importers:
importer_state = importer.start(work_id, new_state)
new_state.update(importer_state)
self._info("Storing updated new state: {0}", new_state)
self.helper.set_state(new_state)
new_state[self._STATE_LAST_RUN] = self._current_unix_timestamp()
self._info("Storing new state: {0}", new_state)
self.helper.set_state(new_state)
message = (
f"State stored, next run in: {self._get_interval()} seconds"
)
self._info(message)
self._complete_work(work_id, message)
else:
next_run = self._get_interval() - (timestamp - last_run)
run_interval = min(run_interval, next_run)
self._info(
"Connector will not run, next run in: {0} seconds", next_run
)
self._sleep(delay_sec=run_interval)
except (KeyboardInterrupt, SystemExit):
self._info("CrowdStrike connector stopping...")
exit(0)
except Exception as e: # noqa: B902
self._error("CrowdStrike connector internal error: {0}", str(e))
self._sleep()
def _initiate_work(self, timestamp: int) -> str:
datetime_str = timestamp_to_datetime(timestamp)
friendly_name = f"{self.helper.connect_name} @ {datetime_str}"
work_id = self.helper.api.work.initiate_work(
self.helper.connect_id, friendly_name
)
self._info("New work '{0}' initiated", work_id)
return work_id
def _complete_work(self, work_id: str, message: str) -> None:
self.helper.api.work.to_processed(work_id, message)
def _get_interval(self) -> int:
return int(self.interval_sec)
def _info(self, msg: str, *args: Any) -> None:
fmt_msg = msg.format(*args)
self.helper.log_info(fmt_msg)
def _error(self, msg: str, *args: Any) -> None:
fmt_msg = msg.format(*args)
self.helper.log_error(fmt_msg)
| 36.995192 | 88 | 0.65666 |
2b19f475be5bed2988cfc5712787a6e3fa36c43c | 897 | py | Python | hypercollate/__init__.py | HuygensING/hyper-collate-python-proxy | bb71620cf8ac32835003ac11ec3b899464c0b223 | [
"Apache-2.0"
] | null | null | null | hypercollate/__init__.py | HuygensING/hyper-collate-python-proxy | bb71620cf8ac32835003ac11ec3b899464c0b223 | [
"Apache-2.0"
] | null | null | null | hypercollate/__init__.py | HuygensING/hyper-collate-python-proxy | bb71620cf8ac32835003ac11ec3b899464c0b223 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Copyright 2018 Huygens ING
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
from os.path import dirname, basename, isfile
from hypercollate.collation_proxy import CollationProxy
from hypercollate.hypercollate_client import HyperCollateClient
modules = glob.glob(dirname(__file__) + "/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f)]
| 32.035714 | 75 | 0.754738 |
da3698cf1ecfcb19aec85cf4efcdef1ecd07a0ab | 1,089 | py | Python | conkit/io/tests/test__cache.py | mesdaghi/conkit | 01468761352bd3ac5078e5e9fef6f73c8c49036e | [
"BSD-3-Clause"
] | 12 | 2017-06-12T17:20:32.000Z | 2021-12-10T09:35:26.000Z | conkit/io/tests/test__cache.py | mesdaghi/conkit | 01468761352bd3ac5078e5e9fef6f73c8c49036e | [
"BSD-3-Clause"
] | 60 | 2017-02-08T19:29:34.000Z | 2022-03-17T16:00:54.000Z | conkit/io/tests/test__cache.py | mesdaghi/conkit | 01468761352bd3ac5078e5e9fef6f73c8c49036e | [
"BSD-3-Clause"
] | 12 | 2017-09-25T07:25:35.000Z | 2022-02-27T18:59:13.000Z | """Testing facility for conkit.io._cache"""
__author__ = "Felix Simkovic"
__date__ = "19 Jun 2017"
import unittest
from conkit.io._cache import ParserCache
class TestParserCache(unittest.TestCase):
def test_1(self):
c = ParserCache()
self.assertTrue("casprr" in c)
def test_2(self):
c = ParserCache()
self.assertFalse("foo" in c)
def test_3(self):
c = ParserCache()
self.assertTrue("casprr" in c.contact_file_parsers)
def test_4(self):
c = ParserCache()
self.assertFalse("bar" in c.contact_file_parsers)
def test_5(self):
c = ParserCache()
self.assertTrue("fasta" in c.sequence_file_parsers)
def test_6(self):
c = ParserCache()
self.assertFalse("baz" in c.sequence_file_parsers)
def test_7(self):
c = ParserCache()
self.assertFalse("fasta" in c.contact_file_parsers)
def test_8(self):
c = ParserCache()
self.assertFalse("casprr" in c.sequence_file_parsers)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 23.170213 | 61 | 0.640955 |
f7ebd419c164252410a2a02d959583784ada17d7 | 5,697 | py | Python | report_crawler/report_crawler/parser/parser_001/_S/SWU001.py | HeadCow/ARPS | fa7257f2aaee70391b8def527dd09f59aa5d26fa | [
"MIT"
] | 1 | 2021-04-09T09:49:11.000Z | 2021-04-09T09:49:11.000Z | report_crawler/report_crawler/parser/parser_001/_S/SWU001.py | HeadCow/ARPS | fa7257f2aaee70391b8def527dd09f59aa5d26fa | [
"MIT"
] | null | null | null | report_crawler/report_crawler/parser/parser_001/_S/SWU001.py | HeadCow/ARPS | fa7257f2aaee70391b8def527dd09f59aa5d26fa | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import re
def Filter(text, ab_sign=0):
# title
if re.search(u"(((报[ ]*告|讲[ ]*座)*(主[ ]*题|题[ ]*目))([ ((](Title|Topic))*|Title|Topic)[)) ]*[::.]+", text) is not None:
text = re.sub(u"(((报[ ]*告|讲[ ]*座)*(主[ ]*题|题[ ]*目))([ ((](Title|Topic))*|Title|Topic)[)) ]*[::.]+[\s\S]*", '', text)
# time
if re.search(u"(((报[ ]*告|讲[ ]*座)*(日期及)*(时[ ]*间|日[ ]*期))([ ((]Time)*|Time)[)) ]*[::.]+", text) is not None:
text = re.sub(u"(((报[ ]*告|讲[ ]*座)*(日期及)*(时[ ]*间|日[ ]*期))([ ((]Time)*|Time)[)) ]*[::.]+[\s\S]*", '', text)
# address
if re.search(u"(((报[ ]*告|讲[ ]*座){0,1}地[ ]*点)([ ((](Address|Venue|Location|Meeting Room|Place))*|Address|Venue|Location|Meeting Room|Place)[)) ]*[::.]+", text) is not None:
text = re.sub(u"(((报[ ]*告|讲[ ]*座){0,1}地[ ]*点)([ ((](Address|Venue|Location|Meeting Room|Place))*|Address|Venue|Location|Meeting Room|Place)[)) ]*[::.]+[\s\S]*", '', text)
# speaker
if re.search(u"(((讲[ ]*授|演[ ]*讲|报[ ]*告|主[ ]*讲)[ ]*(人|专[ ]*家|嘉[ ]*宾)|讲[ ]*(师|者)|主[ ]*讲)([ ((]Speaker)*|Speaker)[)) ]*[::.]+", text) is not None:
text = re.sub(u"(((讲[ ]*授|演[ ]*讲|报[ ]*告|主[ ]*讲)[ ]*(人|专[ ]*家|嘉[ ]*宾)|讲[ ]*(师|者)|主[ ]*讲)([ ((]Speaker)*|Speaker)[)) ]*[::.]+[\s\S]*", '', text)
# abstract
if re.search(u"((((报告|讲座|内容)*(摘要|内容|提要))|(报告|讲座|内容)简介)([ ((]Abstract)*|Abstract)[)) ]*[::.]+", text) is not None:
text = re.sub(u"((((报告|讲座|内容)*(摘要|内容|提要))|(报告|讲座|内容)简介)([ ((]Abstract)*|Abstract)[)) ]*[::.]+[\s\S]*", '', text)
# biography
if re.search(u"(((讲座|主讲|报告|演讲|讲)(者|人|师|专家|嘉宾)|个人)(及其)*(简介|介绍|简历)([ ((](Biography|Bio|Short-Biography|Short bio))*|Biography|Bio|Short-Biography|Short bio)[)) ]*[::.]+", text) is not None:
text = re.sub(u"(((讲座|主讲|报告|演讲|讲)(者|人|师|专家|嘉宾)|个人)(及其)*(简介|介绍|简历)([ ((](Biography|Bio|Short-Biography|Short bio))*|Biography|Bio|Short-Biography|Short bio)[)) ]*[::.]+[\s\S]*", '', text)
# chairman
if re.search(u"主[ ]*持[ ]*(人)*([ ((]Chair)*[)) ]*[::.]+", text) is not None:
text = re.sub(u"主[ ]*持[ ]*(人)*([ ((]Chair)*[)) ]*[::.]+[\s\S]*", '', text)
# invitee
if re.search(u"邀[ ]*请[ ]*人([ ((]Invitee)*[)) ]*[::.]+", text) is not None:
text = re.sub(u"邀[ ]*请[ ]*人([ ((]Invitee)*[)) ]*[::.]+[\s\S]*", '', text)
# others
if re.search(u"欢迎(各位|广大)", text) is not None:
text = re.sub(u"欢迎(各位|广大)[\s\S]*", '', text)
if re.search(u"报[ ]*告[ ]*([一二三四五]|[\d])[ ]*[::.]*", text) is not None:
text = re.sub(u"报[ ]*告[ ]*([一二三四五]|[\d])[ ]*[::.]*[\s\S]*", '', text)
if re.search(u"查看次数[::.]", text) is not None:
text = re.sub(u"查看次数[::.][\s\S]*", '', text)
if re.search(u"附件下载[::.]", text) is not None:
text = re.sub(u"附件下载[::.][\s\S]*", '', text)
if re.search(u"(主办|讲座|报告|演讲)(人)*(单位|企业)[::.]", text) is not None:
text = re.sub(u"(主办|讲座|报告|演讲)(人)*(单位|企业)[::.][\s\S]*", '', text)
if re.search(u"[一二三四五六七八九][、.]", text) != None:
text = re.sub(u"[一二三四五六七八九][、.][\s\S]*", '', text)
return text
def Parser(text, sub_linefeed):
text = text.decode('utf-8')
messages = {}
# title
title_pattern = re.compile(u"(?:(?:(?:报[ ]*告|讲[ ]*座)*(?:主[ ]*题|题[ ]*目))|Title|Topic)[)) ]*[::.]+([\s\S]*)", re.S)
messages['title'] = re.findall(title_pattern, text)
if len(messages['title']) == 1:
messages['title'] = messages['title'][0].strip()
else:
messages['title'] = ''
messages['title'] = Filter(messages['title'], 0)
# time
time_pattern = re.compile(u"(?:(?:(?:报[ ]*告|讲[ ]*座)*(?:时[ ]*间|日[ ]*期))|Time)[)) ]*[::.]+([\s\S]*)", re.S)
messages['time'] = re.findall(time_pattern, text)
if len(messages['time']) == 1:
messages['time'] = messages['time'][0].strip()
else:
messages['time'] = ''
messages['time'] = Filter(messages['time'], 0)
# address
address_pattern = re.compile(u"(?:(?:(?:报[ ]*告|讲[ ]*座){0,1}地[ ]*点)|Address|Venue|Location|Meeting Room|Place)[)) ]*[::.]+([\s\S]*)", re.S)
messages['address'] = re.findall(address_pattern, text)
if len(messages['address']) == 1:
messages['address'] = messages['address'][0].strip()
else:
messages['address'] = ''
messages['address'] = Filter(messages['address'], 0)
# speaker
speaker_pattern = re.compile(u"(?:(?:讲[ ]*授|演[ ]*讲|报[ ]*告|主[ ]*讲)[ ]*(?:人|专[ ]*家|嘉[ ]*宾)|讲[ ]*(?:师|者)|主[ ]*讲|Speaker)[)) ]*[::.]+([\s\S]*)", re.S)
messages['speaker'] = re.findall(speaker_pattern, text)
if len(messages['speaker']) == 1:
messages['speaker'] = messages['speaker'][0].strip()
else:
messages['speaker'] = ''
messages['speaker'] = Filter(messages['speaker'], 0)
# abstract
abstract_pattern = re.compile(u"(?:(?:(?:报告|讲座|内容)*(?:摘要|内容|提要))|(?:报告|讲座|内容)简介|Abstract)[)) ]*[::.]+([\s\S]*)", re.S)
messages['abstract'] = re.findall(abstract_pattern, text)
if len(messages['abstract']) == 1:
messages['abstract'] = sub_linefeed(messages['abstract'][0].strip())
else:
messages['abstract'] = ''
messages['abstract'] = Filter(messages['abstract'], 1)
# biography
biography_pattern = re.compile(u"(?:(?:(?:讲座|主讲|报告|演讲|讲)(?:者|人|师|专家|嘉宾)|个人)(?:及其)*(?:简介|介绍|简历)|Biography|Bio|Short-Biography|Short bio)[)) ]*[::.]+([\s\S]*)", re.S)
messages['biography'] = re.findall(biography_pattern, text)
if len(messages['biography']) == 1:
messages['biography'] = sub_linefeed(messages['biography'][0].strip())
else:
messages['biography'] = ''
messages['biography'] = Filter(messages['biography'], 1)
# If speaker is not exist, we could get it from the biography.
if messages['speaker'] == '':
speakerFromBioChina = re.match(u"(.*?)(教授|副教授|博士|讲师)", messages['biography'])
messages['speaker'] = '' if speakerFromBioChina is None else speakerFromBioChina.group()
if messages['speaker'] == '':
speakerFromBioEng = re.match(u"([A-Z][a-zA-Z]*[ .]*)+", messages['biography'])
messages['speaker'] = '' if speakerFromBioEng is None else speakerFromBioEng.group()
return messages
| 46.696721 | 188 | 0.543971 |
65bd8e0ccfec8de1b923b7ccd5125f1dfdd55364 | 2,078 | py | Python | tf_pwa/tests/test_vis.py | ReynLieu/tf-pwa | f354b5036bc8c37ffba95849de5ec3367934eef8 | [
"MIT"
] | 4 | 2021-05-10T15:17:24.000Z | 2021-08-16T07:40:06.000Z | tf_pwa/tests/test_vis.py | ReynLieu/tf-pwa | f354b5036bc8c37ffba95849de5ec3367934eef8 | [
"MIT"
] | 45 | 2020-10-24T08:26:19.000Z | 2022-03-20T06:14:58.000Z | tf_pwa/tests/test_vis.py | ReynLieu/tf-pwa | f354b5036bc8c37ffba95849de5ec3367934eef8 | [
"MIT"
] | 8 | 2020-10-24T06:41:06.000Z | 2022-01-03T01:29:49.000Z | import matplotlib.pyplot as plt
from tf_pwa.particle import BaseDecay, BaseParticle, DecayChain
from tf_pwa.vis import *
ex_result = """
digraph {
rankdir=LR;
node [shape=point];
edge [arrowhead=none, labelfloat=true];
"A" [shape=none];
"B" [shape=none];
"C" [shape=none];
"D" [shape=none];
"E" [shape=none];
"F" [shape=none];
{ rank=same "A" };
{ rank=same "B","D","E","F","C" };
"A" -> "A->chain0_node_0+D";
"A->chain0_node_0+D" -> "chain0_node_0->chain0_node_2+chain0_node_3";
"A->chain0_node_0+D" -> "D";
"chain0_node_0->chain0_node_2+chain0_node_3" -> "chain0_node_2->B+F";
"chain0_node_0->chain0_node_2+chain0_node_3" -> "chain0_node_3->C+E";
"chain0_node_2->B+F" -> "B";
"chain0_node_2->B+F" -> "F";
"chain0_node_3->C+E" -> "C";
"chain0_node_3->C+E" -> "E";
}
"""
def test_dot():
a = BaseParticle("A")
c = BaseParticle("C")
b = BaseParticle("B")
d = BaseParticle("D")
f = BaseParticle("E")
e = BaseParticle("F")
r = BaseParticle("R")
BaseDecay(r, [b, d])
BaseDecay(a, [r, c])
g = DotGenerator(a)
chains_0 = a.chain_decay()[0]
source_0 = g.get_dot_source()[0]
assert source_0 == DotGenerator.dot_chain(chains_0, True)
assert source_0 != DotGenerator.dot_chain(chains_0, False)
chains = DecayChain.from_particles(a, [b, c, d, e, f])
assert len(chains) == 105 # (2*5-3)!!
def remove_same(decs):
ret = [decs[0]]
for i in decs[1:]:
for j in ret:
if i.topology_same(j):
break
else:
ret.append(i)
return ret
def test_plot():
final = [BaseParticle(i) for i in ["C", "D", "E", "B"]]
decs = DecayChain.from_particles("A", final)
decs = remove_same(decs)
plt.figure(figsize=(15, 9))
for i in range(len(decs)):
ax = plt.subplot(3, 5, i + 1)
plot_decay_struct(decs[i], ax)
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
plt.savefig("topo_4.png", dpi=300, pad_inches=0)
| 28.081081 | 77 | 0.586141 |
203126de6ae9ed45c912d78f50701dbed657eda5 | 1,813 | py | Python | src/svphase/scripts/plot_cov.py | l337x911/inphadel | 7d5ad58d90d745ed82226a8b0b983875cbe1d26e | [
"CC-BY-3.0"
] | 3 | 2015-11-20T02:27:23.000Z | 2020-04-26T07:19:18.000Z | src/svphase/scripts/plot_cov.py | l337x911/inphadel | 7d5ad58d90d745ed82226a8b0b983875cbe1d26e | [
"CC-BY-3.0"
] | null | null | null | src/svphase/scripts/plot_cov.py | l337x911/inphadel | 7d5ad58d90d745ed82226a8b0b983875cbe1d26e | [
"CC-BY-3.0"
] | null | null | null | import numpy as na
from matplotlib import pyplot as plt
hic_sv_fpath = '/home/anand/Projects/assembly/data/gm12878/del1kb.chr20o.hic.bed'
wgs_sv_fpath = '/home/anand/Projects/assembly/data/gm12878/del1kb.chr20.wgs.bed'
hic_nonsv_fpath = '/home/anand/Projects/assembly/data/gm12878/nondel1kb.rand.chr20o.hic.bed'
wgs_nonsv_fpath = '/home/anand/Projects/assembly/data/gm12878/nondel1kb.rand.chr20.wgs.bed'
class GenomeDataset(object):
def __init__(self, x, c):
self.x = x
self.c = c
def get_cov_per_base(fpath):
ratios = []
with open(fpath, 'rb') as f:
for line in f:
tokens = line.strip().split('\t')
ratios.append(float(tokens[3])/float(tokens[5]))
return ratios
fig = plt.figure()
ax1 = fig.add_subplot(111)
hic = GenomeDataset(+0.25, 'r')
wgs = GenomeDataset(-0.25, 'b')
bg = 0.2
fg = 0.8
markersize = 60
# background
hic_ratios = get_cov_per_base(hic_nonsv_fpath)
l = len(hic_ratios)
ax1.scatter(na.ones(l)+hic.x*na.random.random(l), hic_ratios, s=markersize, alpha=bg, linewidths=0, c=hic.c)
wgs_ratios = get_cov_per_base(wgs_nonsv_fpath)
l = len(wgs_ratios)
ax1.scatter(na.ones(l)+wgs.x*na.random.random(l), wgs_ratios, s=markersize, alpha=bg, linewidths=0, c=wgs.c)
hic_ratios = get_cov_per_base(hic_sv_fpath)
l = len(hic_ratios)
ax1.scatter(na.ones(l)+hic.x*na.random.random(l)+1, hic_ratios, s=markersize, alpha=fg, linewidths=0, c=hic.c, label='HiC')
wgs_ratios = get_cov_per_base(wgs_sv_fpath)
l = len(wgs_ratios)
ax1.scatter(na.ones(l)+wgs.x*na.random.random(l)+1, wgs_ratios, s=markersize, alpha=fg, linewidths=0, c=wgs.c, label='WGS')
ax1.set_ylabel('Reads per bp\n (Background gives expected distribution)', horizontalalignment='center')
plt.title('chr20 deletions called by Mills+Conrad')
plt.legend()
plt.xticks((1,2), ('Background', 'SV'))
plt.show()
| 33.574074 | 123 | 0.731384 |
4c5d75e3b7891bde0030e4024f15306f473d3556 | 2,561 | py | Python | nodegraphtree/tests.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | null | null | null | nodegraphtree/tests.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | null | null | null | nodegraphtree/tests.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | null | null | null | from graph_data import graph_data
import numpy as np
from scipy.linalg import block_diag
from typing import Final
from networkx.generators.random_graphs import watts_strogatz_graph, barabasi_albert_graph, fast_gnp_random_graph
from networkx.linalg.graphmatrix import adjacency_matrix
class Gnp_overfit:
def __init__(self, p):
self.p = p
name: Final = "Gnp_overfit"
def get_graph(self, n):
g = fast_gnp_random_graph(n, self.p)
adj = adjacency_matrix(g).todense()
y = np.random.choice([0,1], size=(n))
features = np.ones([n,2])
features[:,1] = y
g = graph_data(adj, features)
all = [x for x in range(0,n)]
y = np.asmatrix(y)
y = y.reshape((n,-1))
return(g, y, all, all)
class Gnp_sign_neighbor:
def __init__(self, p):
self.p = p
self.name = self.__class__.__name__
def get_graph(self, n):
g = fast_gnp_random_graph(n, self.p)
adj = adjacency_matrix(g).todense()
features = np.ones([n,2])
features[:,1] = np.random.normal(size=(n))
y = (0.5 * np.sign(adj @ features[:,1]) + 0.5).astype(int)
y = y.reshape((n,-1))
g = graph_data(adj, features)
all = [x for x in range(0,n)]
return(g, y, all, all)
class Gnp_sign_red_neighbor:
def __init__(self, p):
self.p = p
self.name = self.__class__.__name__
def get_graph(self, n):
g = fast_gnp_random_graph(n, self.p)
adj = adjacency_matrix(g).todense()
features = np.ones([n,3])
features[:,2] = np.random.choice([0,1], size=(n))
features[:,1] = np.random.normal(size=(n))
t = np.multiply(features[:,1], features[:,2])
y = (0.5 * np.sign(adj @ t) + 0.5).astype(int)
y = y.reshape((n,-1))
g = graph_data(adj, features)
all = [x for x in range(0,n)]
return(g, y, all, all)
class Gnp_sign_red_blue_neighbor:
def __init__(self, p):
self.p = p
self.name = self.__class__.__name__
def get_graph(self, n):
g = fast_gnp_random_graph(n, self.p)
adj = adjacency_matrix(g).todense()
features = np.ones([n,3])
features[:,2] = np.random.choice([-1,1], size=(n))
features[:,1] = np.random.normal(size=(n))
t = np.multiply(features[:,1], features[:,2])
y = (0.5 * np.sign(adj @ t) + 0.5).astype(int)
y = y.reshape((n,-1))
g = graph_data(adj, features)
all = [x for x in range(0,n)]
return(g, y, all, all)
| 30.488095 | 112 | 0.575947 |
393979657d42611f974e59923f490bc4b9018002 | 2,437 | py | Python | examples/twisted/wamp/pubsub/complex/backend.py | RyanHope/AutobahnPython | 751b140cf2ea3e343cdcbab02d5cf51fb5295fa9 | [
"MIT"
] | null | null | null | examples/twisted/wamp/pubsub/complex/backend.py | RyanHope/AutobahnPython | 751b140cf2ea3e343cdcbab02d5cf51fb5295fa9 | [
"MIT"
] | null | null | null | examples/twisted/wamp/pubsub/complex/backend.py | RyanHope/AutobahnPython | 751b140cf2ea3e343cdcbab02d5cf51fb5295fa9 | [
"MIT"
] | null | null | null | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
import random
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
and with complex payload every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print("publish: com.myapp.heartbeat")
self.publish(u'com.myapp.heartbeat')
obj = {'counter': counter, 'foo': [1, 2, 3]}
print("publish: com.myapp.topic2", obj)
self.publish(u'com.myapp.topic2', random.randint(0, 100), 23,
c="Hello", d=obj)
counter += 1
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
| 34.814286 | 79 | 0.655724 |
3183a88c730a7fdaff332fc2667b9c2416f2b98e | 1,098 | py | Python | redlist/del_todo.py | hy-kiera/RedList | d2fffa4c58a16ca9877fcccd0525ea4683e3f4dd | [
"MIT"
] | null | null | null | redlist/del_todo.py | hy-kiera/RedList | d2fffa4c58a16ca9877fcccd0525ea4683e3f4dd | [
"MIT"
] | null | null | null | redlist/del_todo.py | hy-kiera/RedList | d2fffa4c58a16ca9877fcccd0525ea4683e3f4dd | [
"MIT"
] | 1 | 2018-06-03T13:47:55.000Z | 2018-06-03T13:47:55.000Z | # -*- coding: utf-8 -*-
import sqlite3
try:
from . import create_table as ct
from . import ls as li
except:
import create_table as ct
import ls as li
from pathlib import Path
def del_todo():
home_dir = str(Path.home())
conn = sqlite3.connect(home_dir + "/task.db")
cur = conn.cursor()
slct_data = "select * from todo where finished = 'n' order by what asc"
# finished = 0 일때를 보여줄지, finished 값과 상관없이 보여줄지 고민
cur.execute(slct_data)
records = cur.fetchall()
li.print_list(records)
delete_data = str(input("What todo do you delete? Please enter the 'what' "))
# check whether there is the delete_data val in table
cmp_data = "select distinct what from todo"
cur.execute(cmp_data)
cmp_records = cur.fetchall()
cmp_list = []
for i in range(len(cmp_records)):
cmp_list.append(cmp_records[i][0])
while True:
if not delete_data in cmp_list:
print("There is not", delete_data, "Please enter the 'what' in table")
delete_data = str(input())
else:
break
del_record = "delete from todo where what = ?"
cur.execute(del_record, [delete_data])
conn.commit()
print("")
| 24.954545 | 78 | 0.704007 |
da0e47d470f6781f28dff30cd75c0c2ec9f0fad2 | 8,842 | py | Python | test/runner/lib/cloud/cs.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/runner/lib/cloud/cs.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/runner/lib/cloud/cs.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | """CloudStack plugin for integration tests."""
from __future__ import absolute_import, print_function
import json
import os
import re
import time
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
ApplicationError,
display,
SubprocessError,
is_shippable,
)
from lib.http import (
HttpClient,
HttpError,
urlparse,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CsCloudProvider, self).__init__(args, config_extension='.ini')
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'quay.io/ansible/cloudstack-test-container:1.0.0'
self.container_name = ''
self.endpoint = ''
self.host = ''
self.port = 0
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(CsCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8888:localhost:8888']
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
if is_shippable():
docker_rm(self.args, self.container_name)
elif not self.args.explain:
display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
super(CsCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = configparser.RawConfigParser()
parser.read(self.config_static_path)
self.endpoint = parser.get('cloudstack', 'endpoint')
parts = urlparse(self.endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
self._wait_for_service()
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
else:
display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
docker_pull(self.args, self.image)
docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
if not self.args.explain:
display.notice('The CloudStack simulator will probably be ready in 5 - 10 minutes.')
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.host = self._get_simulator_address()
display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
else:
self.host = 'localhost'
self.port = 8888
self.endpoint = 'http://%s:%d' % (self.host, self.port)
self._wait_for_service()
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials()
if self.args.docker:
host = self.DOCKER_SIMULATOR_NAME
else:
host = self.host
values = dict(
HOST=host,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_simulator_address(self):
networks = docker_network_inspect(self.args, 'bridge')
try:
bridge = [network for network in networks if network['Name'] == 'bridge'][0]
containers = bridge['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self):
"""Wait for the CloudStack service endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True)
endpoint = self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(30)
raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_credentials(self):
"""Wait for the CloudStack simulator to return credentials.
:rtype: dict[str, str]
"""
client = HttpClient(self.args, always=True)
endpoint = '%s/admin.json' % self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
response = client.get(endpoint)
if response.status_code == 200:
try:
return response.json()
except HttpError as ex:
display.error(ex)
time.sleep(30)
raise ApplicationError('Timeout waiting for CloudStack credentials.')
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
changes = dict(
CLOUDSTACK_CONFIG=self.config_path,
)
env.update(changes)
cmd.append('-e')
cmd.append('cs_resource_prefix=%s' % self.resource_prefix)
| 31.805755 | 141 | 0.606198 |
f61a2e9f425d236c761e74852de182d965a87370 | 5,552 | py | Python | simtools/io_handler.py | RaulRPrado/ctamclib | ed18de64724deec403ed5ee06971c377562331ef | [
"BSD-3-Clause"
] | null | null | null | simtools/io_handler.py | RaulRPrado/ctamclib | ed18de64724deec403ed5ee06971c377562331ef | [
"BSD-3-Clause"
] | null | null | null | simtools/io_handler.py | RaulRPrado/ctamclib | ed18de64724deec403ed5ee06971c377562331ef | [
"BSD-3-Clause"
] | null | null | null | """ Module to handle input and output conventions. """
import datetime
from pathlib import Path
import simtools.config as cfg
__all__ = [
"getModelOutputDirectory",
"getRayTracingOutputDirectory",
"getCorsikaOutputDirectory",
"getTestDataFile",
"getTestDataDirectory",
"getTestOutputFile",
"getTestPlotFile",
]
def _getOutputDirectory(filesLocation, label, mode=None):
"""
Get main output directory for a generic mode
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
mode: str
Name of the subdirectory (ray-tracing, model etc)
Returns
-------
Path
"""
today = datetime.date.today()
labelDir = label if label is not None else "d-" + str(today)
path = Path(filesLocation).joinpath("simtools-output").joinpath(labelDir)
if mode is not None:
path = path.joinpath(mode)
path.mkdir(parents=True, exist_ok=True)
return path.absolute()
def getModelOutputDirectory(filesLocation, label):
"""
Get output directory for model related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "model")
def getLayoutOutputDirectory(filesLocation, label):
"""
Get output directory for layout related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "layout")
def getRayTracingOutputDirectory(filesLocation, label):
"""
Get output directory for ray tracing related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "ray-tracing")
def getCorsikaOutputDirectory(filesLocation, label):
"""
Get output directory for corsika related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "corsika")
def getCameraEfficiencyOutputDirectory(filesLocation, label):
"""
Get output directory for camera efficiency related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "camera-efficiency")
def getApplicationOutputDirectory(filesLocation, label):
"""
Get output directory for applications related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "application-plots")
def getArraySimulatorOutputDirectory(filesLocation, label):
"""
Get output directory for array-simulator related files.
Parameters
----------
filesLocation: str, or Path
Main location of the output files.
label: str
Instance label.
Returns
-------
Path
"""
return _getOutputDirectory(filesLocation, label, "array-simulator")
def getDataFile(parentDir, fileName):
"""
Get path of a data file, using the dataLocation taken from the config file.
Parameters
----------
parentDir: str
Parent directory of the file.
filesName: str
File name.
Returns
-------
Path
"""
return (
Path(cfg.get("dataLocation")).joinpath(parentDir).joinpath(fileName).absolute()
)
def getTestDataDirectory():
"""
Get path of a test file directory, using the testDataLocation taken from the config file.
Returns
-------
Path
"""
return Path(cfg.get("dataLocation")).joinpath("test-data")
def getTestDataFile(fileName):
"""
Get path of a test file, using the testDataLocation taken from the config file.
Parameters
----------
filesName: str
File name
Returns
-------
Path
"""
directory = getTestDataDirectory()
return directory.joinpath(fileName)
def getTestOutputDirectory():
"""
Get path of a test directory, using the testDataLocation taken from the config file.
Returns
-------
Path
"""
return Path(cfg.get("dataLocation")).joinpath("test-output")
def getTestOutputFile(fileName):
"""
Get path of a test file, using the testDataLocation taken from the config file.
Parameters
----------
filesName: str
File name
Returns
-------
Path
"""
directory = getTestOutputDirectory()
return directory.joinpath(fileName)
def getTestPlotFile(fileName):
"""
Get path of a test plot file, using the testDataLocation taken from the config file.
Parameters
----------
filesName: str
File name
Returns
-------
Path
"""
return (
Path(cfg.get("dataLocation"))
.joinpath("test-plots")
.joinpath(fileName)
.absolute()
)
| 20.87218 | 94 | 0.62518 |
0902d626b7fd6cc5debcdecdf7e390a738d2b3ae | 2,036 | py | Python | venv/lib/python3.6/site-packages/channels/testing/live.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/channels/testing/live.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/channels/testing/live.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import connections
from django.test.testcases import LiveServerTestCase, LiveServerThread
from daphne.endpoints import build_endpoint_description_strings
from daphne.server import Server
from ..routing import get_default_application
class DaphneServerThread(LiveServerThread):
"""
LiveServerThread subclass that runs Daphne
"""
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
self.daphne = self._create_server()
self.daphne.run()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
@property
def port(self):
# Dynamically fetch real listen port if we were given 0
if self._port == 0:
return self.daphne.listening_addresses[0][1]
return self._port
@port.setter
def port(self, value):
self._port = value
def _create_server(self):
endpoints = build_endpoint_description_strings(host=self.host, port=self._port)
return Server(
application=get_default_application(),
endpoints=endpoints,
signal_handlers=False,
ws_protocols=getattr(settings, "CHANNELS_WS_PROTOCOLS", None),
root_path=getattr(settings, "FORCE_SCRIPT_NAME", "") or "",
ready_callable=lambda: self.is_ready.set(),
)
def terminate(self):
if hasattr(self, "daphne"):
# Stop the WSGI server
self.daphne.stop()
self.join()
class ChannelsLiveServerTestCase(LiveServerTestCase):
server_thread_class = DaphneServerThread
| 30.38806 | 87 | 0.641454 |
d017f4e1abbb67638d2da498a80525fc50fb0245 | 3,749 | py | Python | fosspay/objects.py | unascribed/fosspay | 7ca4ae5861332bd7311e0092c2c7e286325a0e10 | [
"MIT"
] | 1 | 2020-04-18T08:12:49.000Z | 2020-04-18T08:12:49.000Z | fosspay/objects.py | unascribed/fosspay | 7ca4ae5861332bd7311e0092c2c7e286325a0e10 | [
"MIT"
] | null | null | null | fosspay/objects.py | unascribed/fosspay | 7ca4ae5861332bd7311e0092c2c7e286325a0e10 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, Unicode, Boolean, DateTime
from sqlalchemy import ForeignKey, Table, UnicodeText, Text, text
from sqlalchemy.orm import relationship, backref
from sqlalchemy_utils import ChoiceType
from .database import Base
from datetime import datetime
from enum import Enum
import bcrypt
import binascii
import os
import hashlib
class DonationType(Enum):
one_time = "one_time"
monthly = "monthly"
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(256), nullable=False, index=True)
admin = Column(Boolean())
password = Column(String)
created = Column(DateTime)
password_reset = Column(String(128))
password_reset_expires = Column(DateTime)
stripe_customer = Column(String(256))
def set_password(self, password):
self.password = bcrypt.hashpw(password.encode("utf-8"),
bcrypt.gensalt()).decode("utf-8")
def __init__(self, email, password):
self.email = email
self.admin = False
self.created = datetime.now()
self.set_password(password)
def __repr__(self):
return "<User {}>".format(self.email)
# Flask.Login stuff
# We don't use most of these features
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.email
class Donation(Base):
__tablename__ = 'donations'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
user = relationship("User", backref=backref("donations"))
project_id = Column(Integer, ForeignKey("projects.id"))
project = relationship("Project", backref=backref("donations"))
type = Column(ChoiceType(DonationType, impl=String()))
amount = Column(Integer, nullable=False)
created = Column(DateTime, nullable=False)
updated = Column(DateTime, nullable=False)
comment = Column(String(512))
active = Column(Boolean)
payments = Column(Integer)
hidden = Column(Boolean, server_default='f', nullable=False)
charge_on_first = Column(Boolean, server_default='f', nullable=False)
def __init__(self, user, type, amount, project=None, comment=None, charge_on_first=False):
self.user = user
self.type = type
self.amount = amount
self.created = datetime.now()
self.updated = datetime.now()
self.emailed_about = False
self.comment = comment
self.active = True
self.payments = 1
self.charge_on_first = charge_on_first
if project:
self.project_id = project.id
def __repr__(self):
return "<Donation {} from {}: ${} ({})>".format(
self.id,
self.user.email,
"{:.2f}".format(self.amount / 100),
self.type
)
class Project(Base):
__tablename__ = 'projects'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
created = Column(DateTime, nullable=False)
def __init__(self, name):
self.name = name
self.created = datetime.now()
def __repr__(self):
return "<Project {} {}>".format(self.id, self.name)
class Invoice(Base):
__tablename__ = 'invoices'
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False)
external_id = Column(String(16), index=True)
amount = Column(Integer, nullable=False)
comment = Column(String(512), nullable=False)
def __init__(self):
self.external_id = binascii.hexlify(os.urandom(8)).decode()
self.created = datetime.now()
| 32.042735 | 94 | 0.656175 |
8249af61f2f2b3d75908d3c88a2e7bd365da142b | 2,308 | py | Python | stock.py | OdooCommunityWidgets/product_image_list_view | e969fb0b05ef4bee0e5bce500a34b02f7864c123 | [
"MIT"
] | 2 | 2015-03-25T18:24:51.000Z | 2017-01-02T15:00:24.000Z | stock.py | OdooCommunityWidgets/product_image_list_view | e969fb0b05ef4bee0e5bce500a34b02f7864c123 | [
"MIT"
] | 3 | 2015-04-02T06:27:54.000Z | 2015-06-29T07:37:41.000Z | stock.py | OdooCommunityWidgets/product_image_list_view | e969fb0b05ef4bee0e5bce500a34b02f7864c123 | [
"MIT"
] | 7 | 2015-05-31T19:17:10.000Z | 2018-10-29T12:59:41.000Z | from openerp.osv import fields, osv
class stock_move(osv.Model):
_name = 'stock.move'
_inherit = 'stock.move'
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
res_prod = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id,loc_dest_id, partner_id)
prod_obj = self.pool.get('product.product')
obj = prod_obj.browse(cr, uid, prod_id)
res_prod['value'].update({'image_small': obj.image_small})
return res_prod
_columns = {
'image_small' : fields.binary('Product Image'),
}
stock_move()
class sale_order_line(osv.Model):
_name = 'sale.order.line'
_inherit = 'sale.order.line'
_columns = {
'image_small' : fields.binary('Product Image'),
}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False,image_small=False, context=None):
context = context or {}
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
product_obj = self.pool.get('product.product')
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'image_small': product_obj.image_small or False})
return res
sale_order_line()
class sale_order(osv.Model):
_name = 'sale.order'
_inherit = 'sale.order'
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
res = super(sale_order, self)._prepare_order_line_move(cr, uid, order=order, line=line, picking_id=picking_id, date_planned=date_planned, context=context)
res['image_small'] = line.image_small
return res
sale_order()
| 40.491228 | 199 | 0.642114 |
f18aaee79f97e884ed5d6e3eaf26e530468f2f22 | 1,788 | py | Python | resolwe/permissions/tests/test_tool.py | zagm/resolwe | da371a3ec0260a45ccab848704c6a339a0de79cc | [
"Apache-2.0"
] | null | null | null | resolwe/permissions/tests/test_tool.py | zagm/resolwe | da371a3ec0260a45ccab848704c6a339a0de79cc | [
"Apache-2.0"
] | null | null | null | resolwe/permissions/tests/test_tool.py | zagm/resolwe | da371a3ec0260a45ccab848704c6a339a0de79cc | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
from rest_framework import status
from resolwe.flow.models import Process
from resolwe.flow.views import ProcessViewSet
from resolwe.test import ResolweAPITestCase
class ProcessTestCase(ResolweAPITestCase):
fixtures = ['users.yaml', 'processes.yaml', 'data.yaml', 'collections.yaml', 'permissions.yaml']
def setUp(self):
self.process1 = Process.objects.get(pk=1)
self.post_data = {
'slug': 'new-process',
'name': 'New process',
'type': 'data:test:process:',
'input_schema': [{'name': 'test_field'}],
'run': {'bash': 'echo $PATH'},
}
self.resource_name = 'process'
self.viewset = ProcessViewSet
super().setUp()
def test_get_list(self):
resp = self._get_list(self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
def test_get_list_public_user(self):
resp = self._get_list()
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 1)
def test_get_list_admin(self):
resp = self._get_list(self.admin)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
def test_post(self):
resp = self._post(self.post_data, self.admin)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
def test_patch(self):
resp = self._patch(1, {'name': 'Hacked process'}, self.admin)
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete(self):
resp = self._delete(1, self.admin)
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 33.111111 | 100 | 0.660515 |
6aaca373b8574b7a98be3344e1f44dbc81dc8256 | 7,705 | py | Python | tests/unit/bokeh/io/test_export.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | 1 | 2020-08-07T18:44:46.000Z | 2020-08-07T18:44:46.000Z | tests/unit/bokeh/io/test_export.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | 12 | 2020-08-26T20:19:29.000Z | 2020-08-26T20:19:52.000Z | tests/unit/bokeh/io/test_export.py | tcmetzger/bokeh | 5daff21bfb7e10b69ff9aa2f35eb506777a38264 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import re
from typing import Tuple
# External imports
from flaky import flaky
# Bokeh imports
from bokeh.core.validation import silenced
from bokeh.core.validation.warnings import MISSING_RENDERERS
from bokeh.io.webdriver import webdriver_control
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, Plot, Range1d, Rect
from bokeh.plotting import figure
from bokeh.resources import Resources
# Module under test
import bokeh.io.export as bie # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
@pytest.fixture(scope="module", params=["chromium", "firefox"])
def webdriver(request):
driver = webdriver_control.create(request.param)
try:
yield driver
finally:
webdriver_control.terminate(driver)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@flaky(max_runs=10)
@pytest.mark.selenium
@pytest.mark.parametrize("dimensions", [(14, 14), (44, 44), (144, 144), (444, 444), (1444, 1444)])
def test_get_screenshot_as_png(webdriver, dimensions: Tuple[int, int]) -> None:
width, height = dimensions
border = 5
layout = Plot(x_range=Range1d(), y_range=Range1d(),
plot_height=width, plot_width=height,
min_border=border,
hidpi=False,
toolbar_location=None,
outline_line_color=None, background_fill_color="#00ff00", border_fill_color="#00ff00")
with silenced(MISSING_RENDERERS):
png = bie.get_screenshot_as_png(layout, driver=webdriver)
# a WxHpx image of white pixels
assert png.size == (width, height)
data = png.tobytes()
assert len(data) == 4*width*height
assert data == b"\x00\xff\x00\xff"*width*height
@flaky(max_runs=10)
@pytest.mark.selenium
@pytest.mark.parametrize("dimensions", [(14, 14), (44, 44), (144, 144), (444, 444), (1444, 1444)])
def test_get_screenshot_as_png_with_glyph(webdriver, dimensions: Tuple[int, int]) -> None:
width, height = dimensions
border = 5
layout = Plot(x_range=Range1d(-1, 1), y_range=Range1d(-1, 1),
plot_height=width, plot_width=height,
toolbar_location=None,
min_border=border,
hidpi=False,
outline_line_color=None, background_fill_color="#00ff00", border_fill_color="#00ff00")
glyph = Rect(x="x", y="y", width=2, height=2, fill_color="#ff0000", line_color="#ff0000")
source = ColumnDataSource(data=dict(x=[0], y=[0]))
layout.add_glyph(source, glyph)
png = bie.get_screenshot_as_png(layout, driver=webdriver)
assert png.size == (width, height)
data = png.tobytes()
assert len(data) == 4*width*height
# count red pixels in center area
count = 0
for x in range(width*height):
pixel = data[x*4:x*4+4]
if pixel == b"\xff\x00\x00\xff":
count += 1
w, h, b = width, height, border
expected_count = w*h - 2*b*(w + h) + 4*b**2
assert count == expected_count
@flaky(max_runs=10)
@pytest.mark.selenium
def test_get_screenshot_as_png_with_unicode_minified(webdriver) -> None:
p = figure(title="유니 코드 지원을위한 작은 테스트")
with silenced(MISSING_RENDERERS):
png = bie.get_screenshot_as_png(p, driver=webdriver, resources=Resources(mode="inline", minified=True))
assert len(png.tobytes()) > 0
@flaky(max_runs=10)
@pytest.mark.selenium
def test_get_screenshot_as_png_with_unicode_unminified(webdriver) -> None:
p = figure(title="유니 코드 지원을위한 작은 테스트")
with silenced(MISSING_RENDERERS):
png = bie.get_screenshot_as_png(p, driver=webdriver, resources=Resources(mode="inline", minified=False))
assert len(png.tobytes()) > 0
@flaky(max_runs=10)
@pytest.mark.selenium
def test_get_svgs_no_svg_present() -> None:
layout = Plot(x_range=Range1d(), y_range=Range1d(),
plot_height=20, plot_width=20, toolbar_location=None)
with silenced(MISSING_RENDERERS):
svgs = bie.get_svgs(layout)
assert svgs == []
@flaky(max_runs=10)
@pytest.mark.selenium
def test_get_svgs_with_svg_present(webdriver) -> None:
def fix_ids(svg):
svg = re.sub(r'id="\w{12}"', 'id="X"', svg)
svg = re.sub(r'url\(#\w{12}\)', 'url(#X)', svg)
return svg
layout = Plot(x_range=Range1d(), y_range=Range1d(),
plot_height=20, plot_width=20, toolbar_location=None,
outline_line_color=None, border_fill_color=None,
background_fill_color="red", output_backend="svg")
with silenced(MISSING_RENDERERS):
svg0 = fix_ids(bie.get_svgs(layout, driver=webdriver)[0])
svg1 = fix_ids(bie.get_svgs(layout, driver=webdriver)[0])
svg2 = (
'<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="20" height="20">'
'<defs/>'
'<g>'
'<g transform="scale(1,1) translate(0.5,0.5) translate(0, 0)">'
'<rect fill="rgb(255,0,0)" stroke="none" x="5" y="5" width="10" height="10" fill-opacity="1"/>'
'<g/>'
'</g>'
'<g transform="scale(1,1) translate(0.5,0.5) translate(0, 0)"/>'
'</g>'
'</svg>'
)
assert svg0 == svg2
assert svg1 == svg2
def test_get_layout_html_resets_plot_dims() -> None:
initial_height, initial_width = 200, 250
layout = Plot(x_range=Range1d(), y_range=Range1d(),
plot_height=initial_height, plot_width=initial_width)
with silenced(MISSING_RENDERERS):
bie.get_layout_html(layout, height=100, width=100)
assert layout.plot_height == initial_height
assert layout.plot_width == initial_width
def test_layout_html_on_child_first() -> None:
p = Plot(x_range=Range1d(), y_range=Range1d())
with silenced(MISSING_RENDERERS):
bie.get_layout_html(p, height=100, width=100)
with silenced(MISSING_RENDERERS):
layout = row(p)
bie.get_layout_html(layout)
def test_layout_html_on_parent_first() -> None:
p = Plot(x_range=Range1d(), y_range=Range1d())
with silenced(MISSING_RENDERERS):
layout = row(p)
bie.get_layout_html(layout)
with silenced(MISSING_RENDERERS):
bie.get_layout_html(p, height=100, width=100)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 35.182648 | 130 | 0.550422 |
a03489402643b7974de1cb24b27aa61d3dde23aa | 213,398 | py | Python | modules/s3cfg.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | 1 | 2018-06-06T12:11:25.000Z | 2018-06-06T12:11:25.000Z | modules/s3cfg.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | null | null | null | modules/s3cfg.py | krypt0x/eden | 63679c36d627b5d0be5858759217408e09aa4ef0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Deployment Settings
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Config",)
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
from s3compat import basestring, INTEGER_TYPES
from s3theme import FORMSTYLES
class S3Config(Storage):
"""
Deployment Settings Helper Class
"""
# Formats from static/scripts/ui/i18n converted to Python style
date_formats = {"af": "%d/%m/%Y",
"ar": "%d/%m/%Y",
"ar-dz": "%d/%m/%Y",
"az": "%d.%m.%Y",
"be": "%d.%m.%Y",
"bg": "%d.%m.%Y",
"bs": "%d.%m.%Y",
"ca": "%d/%m/%Y",
"cs": "%d.%m.%Y",
"cy-gb": "%d/%m/%Y",
"da": "%d-%m-%Y",
"de": "%d.%m.%Y",
#"dv": "",
"el": "%d/%m/%Y",
"eo": "%d/%m/%Y",
"es": "%d/%m/%Y",
"et": "%d.%m.%Y",
"eu": "%Y-%m-%d",
"fa": "%Y/%m/%d",
"fi": "%d.%m.%Y",
"fo": "%d-%m-%Y",
"fr": "%d/%m/%Y",
"fr-ca": "%Y-%m-%d",
"fr-ch": "%d.%m.%Y",
"gl": "%d/%m/%Y",
"he": "%d/%m/%Y",
"hi": "%d/%m/%Y",
"hr": "%d.%m.%Y",
"hu": "%Y.%m.%d.",
"hy": "%d.%m.%Y",
"id": "%d/%m/%Y",
"is": "%d.%m.%Y",
"it": "%d/%m/%Y",
"ja": "%Y/%m/%d",
"ka": "%d-%m-%Y",
"kk": "%d.%m.%Y",
"km": "%d-%m-%Y",
"ko": "%Y-%m-%d",
"ky": "%d.%m.%Y",
"lb": "%d.%m.%Y",
"lt": "%Y-%m-%d",
"lv": "%d.%m.%Y",
"mk": "%d.%m.%Y",
"ml": "%d/%m/%Y",
#"mn": "",
"ms": "%d/%m/%Y",
#"my": "",
"nb": "%d.%m.%Y",
"ne": "%d/%m/%Y",
"nl": "%d-%m-%Y",
"nl-be": "%d/%m/%Y",
"nn": "%d.%m.%Y",
"no": "%d.%m.%Y",
"pl": "%d.%m.%Y",
"prs": "%Y/%m/%d",
"ps": "%Y/%m/%d",
"pt": "%d/%m/%Y",
"pt-br": "%d/%m/%Y",
"rm": "%d/%m/%Y",
"ro": "%d.%m.%Y",
"ru": "%d.%m.%Y",
#"si": "",
"sk": "%d.%m.%Y",
"sl": "%d.%m.%Y",
"sq": "%d.%m.%Y",
"sr": "%d.%m.%Y",
"sr-sr": "%d.%m.%Y",
"sv": "%Y-%m-%d",
"ta": "%d/%m/%Y",
#"tet": "",
"th": "%d/%m/%Y",
"tj": "%d.%m.%Y",
#"tl": "",
"tr": "%d.%m.%Y",
"uk": "%d.%m.%Y",
#"ur": "",
"vi": "%d/%m/%Y",
"zh-cn": "%Y-%m-%d",
"zh-hk": "%Y-%m-%d",
"zh-tw": "%Y/%m/%d",
}
# PDF fonts for each language
# fontset format -> [normal-version, bold-version]
# defaults to ["Helvetica", "Helvetica-Bold"] if not-specified here
# Requires installation of appropriate font - e.g. using import_font in tasks.cfg
# Unifont can be downloaded from http://unifoundry.com/unifont/index.html
fonts = {"ar": ["unifont", "unifont"], # Note that this isn't an ideal font for Arabic as it doesn't support reshaping. We use arabic_reshaper to improve this.
#"dv": ["unifont", "unifont"],
#"dz": ["unifont", "unifont"],
"km": ["unifont", "unifont"],
"ko": ["unifont", "unifont"],
"mn": ["unifont", "unifont"],
"my": ["unifont", "unifont"],
"ne": ["unifont", "unifont"],
"pl": ["unifont", "unifont"],
"prs": ["unifont", "unifont"],
"ps": ["unifont", "unifont"],
"th": ["unifont", "unifont"],
"tr": ["unifont", "unifont"],
"ur": ["unifont", "unifont"],
"vi": ["unifont", "unifont"],
"zh-cn": ["unifont", "unifont"],
"zh-tw": ["unifont", "unifont"],
}
def __init__(self):
super(S3Config, self).__init__()
self.asset = Storage()
self.auth = Storage()
self.auth.email_domains = []
self.base = Storage()
# Allow templates to append rather than replace
self.base.prepopulate = ["default/base"]
self.base.prepopulate_demo = ["default/users"]
self.br = Storage()
self.cap = Storage()
self.cms = Storage()
self.cr = Storage()
self.custom = Storage()
self.database = Storage()
self.dc = Storage()
self.deploy = Storage()
self.disease = Storage()
self.doc = Storage()
self.dvr = Storage()
self.edu = Storage()
self.event = Storage()
self.fin = Storage()
# Allow templates to append rather than replace
self.fin.currencies = {}
self.fire = Storage()
# @ToDo: Move to self.ui
self.frontpage = Storage()
self.frontpage.rss = []
self.gis = Storage()
# Allow templates to append rather than replace
self.gis.countries = []
self.hms = Storage()
self.hrm = Storage()
self.inv = Storage()
self.irs = Storage()
self.L10n = Storage()
# Allow templates to append rather than replace
self.L10n.languages = {"en": "English"}
self.log = Storage()
self.mail = Storage()
self.member = Storage()
self.mobile = Storage()
self.msg = Storage()
self.org = Storage()
self.police = Storage()
self.pr = Storage()
self.proc = Storage()
self.project = Storage()
self.req = Storage()
self.search = Storage()
self.security = Storage()
self.setup = Storage()
# Allow templates to append rather than replace
self.setup.wizard_questions = []
self.supply = Storage()
self.sync = Storage()
self.tasks = Storage()
self.transport = Storage()
self.ui = Storage()
self.vulnerability = Storage()
self.xforms = Storage()
# Lazy property
self._db_params = None
self._debug = None
self._lazy_unwrapped = []
# Provide a minimal list of core modules
self.modules = {"default": Storage(name_nice = "Home",
), # Default
"admin": Storage(name_nice = "Administration",
), # Admin
"gis": Storage(name_nice = "Map",
), # GIS
"pr": Storage(), # Person Registry
"org": Storage(name_nice = "Organizations",
), # Organization Registry
}
# -------------------------------------------------------------------------
@property
def db_params(self):
"""
Current database parameters, with defaults applied (lazy property)
returns: a dict with database parameters:
{type, host, port, database, username, password}
"""
parameters = self._db_params
if parameters is None:
db_type = self.get_database_type()
get_param = self.database.get
pool_size = get_param("pool_size", 30)
if db_type == "sqlite":
parameters = {}
else:
if db_type == "postgres":
default_port = "5432"
elif db_type == "mysql":
default_port = "3306"
else:
default_port = None
parameters = {"host": get_param("host", "localhost"),
"port": get_param("port", default_port),
"database": get_param("database", "sahana"),
"username": get_param("username", "sahana"),
"password": get_param("password", "password"),
"pool_size": pool_size,
}
parameters["type"] = db_type
self._db_params = parameters
return parameters
# -------------------------------------------------------------------------
# Debug
def check_debug(self):
"""
(Lazy) check debug mode and activate the respective settings
"""
debug = self._debug
base_debug = bool(self.get_base_debug())
# Modify settings only if self.base.debug has changed
if debug is None or debug != base_debug:
self._debug = base_debug
debug = base_debug or \
current.request.get_vars.get("debug", False)
from gluon.custom_import import track_changes
s3 = current.response.s3
if debug:
s3.debug = True
track_changes(True)
else:
s3.debug = False
track_changes(False)
# -------------------------------------------------------------------------
# Template
def get_template(self):
"""
Which deployment template to use for config.py, layouts.py, menus.py
http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates
"""
return self.base.get("template", "default")
def import_template(self, config="config"):
"""
Import and invoke the template config (new module pattern). Allows
to specify multiple templates like:
settings.template = ("default", "locations.US")
Configurations will be imported and executed in order of appearance
@param config: name of the config-module
"""
names = self.get_template()
if not isinstance(names, (list, tuple)):
names = [names]
for name in names:
package = "templates.%s" % name
self.check_debug()
template = None
try:
# Import the template
template = getattr(__import__(package, fromlist=[config]), config)
except ImportError:
raise RuntimeError("Template not found: %s" % name)
else:
template.config(self)
return self
# -------------------------------------------------------------------------
# Theme
#
def set_theme(self):
"""
Inspect base.theme_* settings and cache paths in response.s3
accordingly (this needs to be run only once, getters will then
use cached paths)
@returns: the theme name
"""
s3 = current.response.s3
path_to = "/".join
default = self.base.get("theme", "default")
theme = default.split(".")
theme_path = path_to(theme)
# The theme name
s3.theme = theme_name = theme[-1]
# Path under modules/templates/ for layouts (views, e.g. layout.html)
layouts = self.base.get("theme_layouts")
if layouts:
s3.theme_layouts = path_to(layouts.split("."))
else:
s3.theme_layouts = theme_path
# Path under static/themes/ for eden.min.css
styles = self.base.get("theme_styles")
if styles:
s3.theme_styles = path_to(styles.split("."))
else:
s3.theme_styles = theme_path
# Path under modules/templates/ for css.cfg
config = self.base.get("theme_config")
if config:
s3.theme_config = path_to(config.split("."))
else:
s3.theme_config = s3.theme_styles
# Path under static/themes/ for base styles (e.g. foundation/*.css)
base = self.base.get("theme_base")
if base:
s3.theme_base = path_to(base.split("."))
else:
s3.theme_base = s3.theme_styles
return theme_name
def get_theme(self):
"""
The location of the current theme, relative to modules/templates
and static/themes, respectively. Uses "." as path separator, e.g.:
settings.base.theme = "SAMBRO.AlertHub"
This is the default location of theme components, which can be
individually adjusted with theme_layouts, theme_styles and
theme_base settings if required.
"""
theme = current.response.s3.theme
if not theme:
theme = self.set_theme()
return theme
def get_theme_layouts(self):
"""
The location of the layouts for the current theme:
- modules/templates/[theme_layouts]/layouts.py
- modules/templates/[theme_layouts]/views
=> defaults to theme
"""
layouts = current.response.s3.theme_layouts
if not layouts:
self.set_theme()
layouts = current.response.s3.theme_layouts
return layouts
def get_theme_styles(self):
"""
The location of the theme styles:
- static/themes/[theme_styles]/eden.min.css
=> defaults to theme
"""
styles = current.response.s3.theme_styles
if not styles:
self.set_theme()
styles = current.response.s3.theme_styles
return styles
def get_theme_config(self):
"""
The location of the theme CSS config:
- modules/templates/[theme_config]/css.cfg
=> defaults to theme_styles
"""
config = current.response.s3.theme_config
if not config:
self.set_theme()
config = current.response.s3.theme_config
return config
def get_theme_base(self):
"""
The location of the theme base styles (Foundation):
- static/themes/[theme_base]/foundation
=> defaults to theme_styles
"""
base = current.response.s3.theme_base
if not base:
self.set_theme()
base = current.response.s3.theme_base
return base
def get_base_xtheme(self):
"""
Whether there is a custom Ext theme or simply use the default xtheme-gray
- specified as <themefolder>/xtheme-<filename>.css
"""
return self.base.get("xtheme")
# -------------------------------------------------------------------------
# Customise Hooks
def customise_controller(self, tablename, **attr):
"""
Customise a Controller
- runs before resource customisation
- but prep runs after resource customisation
"""
customise = self.get("customise_%s_controller" % tablename)
if customise:
return customise(**attr)
else:
return attr
def customise_home(self, module, alt_function):
"""
Allow use of a Customised module Home page
Fallback to cms_index if not configured
Fallback to an alt_function if defined in the controller
"""
customise = self.get("customise_%s_home" % module)
if customise:
return customise()
else:
return current.s3db.cms_index(module, alt_function=alt_function)
def customise_resource(self, tablename):
"""
Get customisation callback for a resource
- runs after controller customisation
- but runs before prep
"""
return self.get("customise_%s_resource" % tablename)
# -------------------------------------------------------------------------
def has_module(self, module_name):
"""
Whether a Module is enabled in the current template
"""
return module_name in self.modules
# -------------------------------------------------------------------------
def get_facebook_pixel_id(self):
"""
Facebook Pixel ID
"""
return self.base.get("facebook_pixel_id")
# -------------------------------------------------------------------------
def get_google_analytics_tracking_id(self):
"""
Google Analytics Key
"""
return self.base.get("google_analytics_tracking_id")
# -------------------------------------------------------------------------
def get_youtube_id(self):
"""
List of YouTube IDs for the /default/video page
"""
return self.base.get("youtube_id", [])
# -------------------------------------------------------------------------
def is_cd_version(self):
"""
Whether we're running from a non-writable CD
"""
return self.base.get("cd_version", False)
# -------------------------------------------------------------------------
# Tasks
# -------------------------------------------------------------------------
def get_task(self, taskname):
"""
Ability to define custom Tasks in the Template
"""
return self.tasks.get(taskname)
# -------------------------------------------------------------------------
# Authentication settings
def get_auth_hmac_key(self):
"""
salt to encrypt passwords - normally randomised during 1st run
"""
return self.auth.get("hmac_key", "akeytochange")
def get_auth_password_changes(self):
"""
Are password changes allowed?
- set to False if passwords are being managed externally (OpenID / SMTP / LDAP)
"""
return self.auth.get("password_changes", True)
def get_auth_password_retrieval(self):
"""
Allow password retrieval?
"""
return self.__lazy("auth", "password_retrieval", default=True)
def get_auth_password_min_length(self):
"""
To set the Minimum Password Length
"""
return self.auth.get("password_min_length", int(4))
def get_auth_gmail_domains(self):
""" List of domains which can use GMail SMTP for Authentication """
return self.auth.get("gmail_domains", [])
def get_auth_office365_domains(self):
""" List of domains which can use Office 365 SMTP for Authentication """
return self.auth.get("office365_domains", [])
def get_auth_google(self):
"""
Read the Google OAuth settings
- if configured, then it is assumed that Google Authentication
is enabled
"""
auth_get = self.auth.get
client_id = auth_get("google_id", False)
client_secret = auth_get("google_secret", False)
if client_id and client_secret:
return {"id": client_id, "secret": client_secret}
else:
return False
def get_auth_humanitarian_id(self):
"""
Read the Humanitarian.ID OAuth settings
- if configured, then it is assumed that Humanitarian.ID
Authentication is enabled
"""
auth_get = self.auth.get
client_id = auth_get("humanitarian_id_client_id", False)
client_secret = auth_get("humanitarian_id_client_secret", False)
if client_id and client_secret:
return {"id": client_id, "secret": client_secret}
else:
return False
def get_auth_openid(self):
""" Use OpenID for Authentication """
return self.auth.get("openid", False)
def get_auth_openid_connect(self):
"""
Use an OpenID Connect authentication service
- must be configured with a dict like:
{"auth_url": authURL,
"token_url": tokenURL,
"userinfo_url": userinfoURL,
"id": clientID,
"secret": clientSecret,
}
"""
required = ("auth_url", "token_url", "userinfo_url", "id", "secret")
setting = self.auth.get("openid_connect")
if setting and all(setting.get(k) for k in required):
return setting
else:
return None
def get_auth_add_role(self):
"""
Custom Function to add a Role
- called by S3RoleManager UI
- useful for automatically adding subsidiary roles
"""
return self.auth.get("add_role", None)
def get_auth_remove_role(self):
"""
Custom Function to remove a Role
- called by S3RoleManager UI
- useful for automatically removing subsidiary roles
"""
return self.auth.get("remove_role", None)
def get_auth_masterkey(self):
"""
Allow authentication with master key (= a single key instead of
username+password)
"""
return self.auth.get("masterkey", False)
def get_auth_masterkey_app_key(self):
"""
App key for clients using master key authentication
- a string (recommended length 32 chars, random pattern)
- specific for the deployment (i.e. not template)
- should be configured in 000_config.py (alongside hmac_key)
"""
return self.auth.get("masterkey_app_key")
def get_auth_masterkey_token_ttl(self):
"""
The time-to-live for master key auth tokens in seconds
- tokens must survive two request cycles incl. prep, so
TTL shouldn't be too short with slow network/server
- should be short enough to prevent unused tokens from
lingering
"""
return self.auth.get("masterkey_token_ttl", 600)
def get_auth_masterkey_context(self):
"""
Getter for master key context information
- a JSON-serializable dict with context data, or
- a function that takes a master key (Row) and returns such a dict
NB the getter should not expose the master key itself
in the context dict!
"""
return self.auth.get("masterkey_context")
def get_security_self_registration(self):
"""
Whether Users can register themselves
- False to disable self-registration
- True to use the default registration page at default/user/register
- "index" to use a cyustom registration page defined in private/templates/<template>/controllers.py
"""
return self.security.get("self_registration", True)
def get_security_registration_visible(self):
visible = self.get_security_self_registration() and \
self.security.get("registration_visible", True)
return visible
def get_security_version_info(self):
"""
Whether to show version info on the about page
"""
return self.security.get("version_info", True)
def get_security_version_info_requires_login(self):
"""
Whether the version info on the About page requires login
"""
return self.security.get("version_info_requires_login", False)
def get_auth_registration_requires_verification(self):
return self.auth.get("registration_requires_verification", False)
def get_auth_registration_requires_approval(self):
return self.auth.get("registration_requires_approval", False)
def get_auth_registration_welcome_email(self):
"""
Send a welcome-email to newly registered users
"""
return self.auth.get("registration_welcome_email", True)
def get_auth_always_notify_approver(self):
return self.auth.get("always_notify_approver", True)
def get_auth_login_next(self):
"""
Which page to go to after login
- can be a callable
"""
return self.auth.get("login_next", URL(c="default", f="index"))
def get_auth_login_next_always(self):
"""
Whether the login_next overrides the _next variable
"""
return self.auth.get("login_next_always", False)
def get_auth_show_link(self):
return self.auth.get("show_link", True)
def get_auth_registration_link_user_to(self):
"""
Link User accounts to none or more of:
* staff
* volunteer
* member
"""
return self.auth.get("registration_link_user_to")
def get_auth_registration_link_user_to_default(self):
"""
Link User accounts to none or more of:
* staff
* volunteer
* member
Should be an iterable.
"""
return self.auth.get("registration_link_user_to_default")
def get_auth_opt_in_team_list(self):
return self.auth.get("opt_in_team_list", [])
def get_auth_opt_in_to_email(self):
return self.get_auth_opt_in_team_list() != []
def get_auth_opt_in_default(self):
return self.auth.get("opt_in_default", False)
def get_auth_registration_requests_home_phone(self):
return self.auth.get("registration_requests_home_phone", False)
def get_auth_registration_requests_mobile_phone(self):
return self.auth.get("registration_requests_mobile_phone", False)
def get_auth_registration_mobile_phone_mandatory(self):
" Make the selection of Mobile Phone Mandatory during registration "
return self.auth.get("registration_mobile_phone_mandatory", False)
def get_auth_registration_requests_organisation(self):
" Have the registration form request the Organisation "
return self.auth.get("registration_requests_organisation", False)
def get_auth_admin_sees_organisation(self):
" See Organisations in User Admin"
return self.auth.get("admin_sees_organisation",
self.get_auth_registration_requests_organisation())
def get_auth_registration_organisation_required(self):
" Make the selection of Organisation required during registration "
return self.auth.get("registration_organisation_required", False)
def get_auth_registration_organisation_link_create(self):
""" Show a link to create new orgs in registration form """
return self.auth.get("registration_organisation_link_create", True)
def get_auth_registration_organisation_hidden(self):
" Hide the Organisation field in the registration form unless an email is entered which isn't whitelisted "
return self.auth.get("registration_organisation_hidden", False)
def get_auth_registration_organisation_default(self):
" Default the Organisation during registration - will return the organisation_id"
organisation_id = self.__lazy("auth", "registration_organisation_default", default=None)
if organisation_id:
try:
int(organisation_id)
except (ValueError, TypeError):
# Must be a Name
table = current.s3db.org_organisation
row = current.db(table.name == organisation_id).select(table.id,
).first()
if row:
organisation_id = row.id
else:
organisation_id = table.insert(name = organisation_id)
return organisation_id
def get_auth_registration_requests_organisation_group(self):
" Have the registration form request the Organisation Group "
return self.auth.get("registration_requests_organisation_group", False)
def get_auth_registration_organisation_group_required(self):
" Make the selection of Organisation Group required during registration "
return self.auth.get("registration_organisation_group_required", False)
def get_auth_registration_requests_site(self):
" Have the registration form request the Site "
return self.auth.get("registration_requests_site", False)
def get_auth_registration_site_required(self):
" Make the selection of site required during registration "
return self.auth.get("registration_site_required", False)
def get_auth_registration_requests_image(self):
""" Have the registration form request an Image """
return self.auth.get("registration_requests_image", False)
def get_auth_registration_pending(self):
""" Message someone gets when they register & they need approving """
T = current.T
message = self.auth.get("registration_pending")
if message:
return T(message)
approver = self.get_mail_approver()
if "@" in approver:
return T("Registration is still pending approval from Approver (%s) - please wait until confirmation received.") % \
approver
else:
return T("Registration is still pending approval from the system administrator - please wait until confirmation received.")
def get_auth_registration_pending_approval(self):
""" Message someone gets when they register & they need approving """
T = current.T
message = self.auth.get("registration_pending_approval")
if message:
return T(message)
approver = self.get_mail_approver()
if "@" in approver:
return T("Thank you for validating your email. Your user account is still pending for approval by the system administrator (%s). You will get a notification by email when your account is activated.") % \
approver
else:
return T("Thank you for validating your email. Your user account is still pending for approval by the system administrator. You will get a notification by email when your account is activated.")
def get_auth_registration_roles(self):
"""
A dictionary of realms, with lists of role UUIDs, to assign to
newly-registered users
Use key = 0 to have the roles not restricted to a realm
"""
return self.auth.get("registration_roles", [])
def get_auth_org_admin_to_first(self):
"""
Whether the first user to register for an Org should get the
ORG_ADMIN role for that Org
"""
return self.auth.get("org_admin_to_first", False)
def get_auth_terms_of_service(self):
"""
Force users to accept Terms of Service before Registering an account
- uses <template>/views/tos.html
"""
return self.auth.get("terms_of_service", False)
def get_auth_consent_tracking(self):
""" Expose options to track user consent """
return self.auth.get("consent_tracking", False)
def get_auth_consent_check(self):
"""
Ask for consent renewal upon login
- a function that returns a list of processing type codes for
which the user shall renew their consent after login
"""
return self.auth.get("consent_check", None)
def get_auth_registration_volunteer(self):
""" Redirect the newly-registered user to their volunteer details page """
return self.auth.get("registration_volunteer", False)
def get_auth_record_approval(self):
""" Use record approval (False by default) """
return self.auth.get("record_approval", False)
def get_auth_record_approval_required_for(self):
""" Which tables record approval is required for """
return self.auth.get("record_approval_required_for", [])
def get_auth_record_approval_manual(self):
""" Which tables record approval is not automatic for """
return self.auth.get("record_approval_manual", [])
def get_auth_realm_entity_types(self):
""" Which entity types to use as realm entities in role manager """
default = ("org_group",
"org_organisation",
"org_office",
"inv_warehouse",
"pr_group",
)
return self.__lazy("auth", "realm_entity_types", default=default)
def get_auth_privileged_roles(self):
"""
Roles a non-ADMIN user can only assign if they have
a certain required role themselves:
- a tuple|list of role UUIDs = user must have the roles
themselves in order to assign them
- a dict {assignable_role:required_role}
"""
return self.__lazy("auth", "privileged_roles", default=[])
def get_auth_realm_entity(self):
""" Hook to determine the owner entity of a record """
return self.auth.get("realm_entity")
def get_auth_person_realm_human_resource_site_then_org(self):
"""
Should we set pr_person.realm_entity to that of
hrm_human_resource.site_id$pe_id
or
hrm_human_resource.organisation_id$pe_id if 1st not set
"""
return self.auth.get("person_realm_human_resource_site_then_org", False)
def get_auth_person_realm_member_org(self):
"""
Sets pr_person.realm_entity to
organisation.pe_id of member_member
"""
return self.auth.get("person_realm_member_org", False)
def get_auth_entity_role_manager(self):
"""
Activate Entity Role Manager (=embedded Role Manager Tab for OrgAdmins)
"""
return self.auth.get("entity_role_manager", False)
def get_auth_role_modules(self):
"""
Which modules are included in the Role Manager
- to assign discrete permissions to via UI
"""
T = current.T
return self.auth.get("role_modules", OrderedDict([
("staff", T("Staff")),
("vol", T("Volunteers")),
("member", T("Members")),
("inv", T("Warehouses")),
("asset", T("Assets")),
("project", T("Projects")),
("survey", T("Assessments")),
("irs", T("Incidents"))
]))
def get_auth_access_levels(self):
"""
Access levels for the Role Manager UI
"""
T = current.T
return self.auth.get("access_levels", OrderedDict([
("reader", T("Reader")),
("data_entry", T("Data Entry")),
("editor", T("Editor")),
("super", T("Super Editor"))
]))
def get_auth_approve_user_message(self):
return self.auth.get("auth_approve_user_message", None)
def get_auth_set_presence_on_login(self):
return self.auth.get("set_presence_on_login", False)
def get_auth_ignore_levels_for_presence(self):
return self.auth.get("ignore_levels_for_presence", ("L0",))
def get_auth_create_unknown_locations(self):
return self.auth.get("create_unknown_locations", False)
def get_security_archive_not_delete(self):
return self.security.get("archive_not_delete", True)
def get_security_audit_read(self):
return self.security.get("audit_read", False)
def get_security_audit_write(self):
return self.security.get("audit_write", False)
def get_security_policy(self):
" Default is Simple Security Policy "
return self.security.get("policy", 1)
def get_security_strict_ownership(self):
"""
Ownership-rule for records without owner:
True = not owned by any user (strict ownership, default)
False = owned by any authenticated user
"""
return self.security.get("strict_ownership", True)
def get_security_map(self):
return self.security.get("map", False)
# -------------------------------------------------------------------------
# Base settings
def get_system_name(self):
"""
System Name - for the UI & Messaging
"""
return self.base.get("system_name", current.T("Sahana Eden Humanitarian Management Platform"))
def get_system_name_short(self):
"""
System Name (Short Version) - for the UI & Messaging
"""
return self.base.get("system_name_short", "Sahana")
def get_base_debug(self):
"""
Debug mode: Serve CSS/JS in separate uncompressed files
"""
return self.base.get("debug", False)
def get_base_allow_testing(self):
"""
Allow testing of Eden using EdenTest
"""
return self.base.get("allow_testing", True)
def get_base_migrate(self):
""" Whether to allow Web2Py to migrate the SQL database to the new structure """
return self.base.get("migrate", True)
def get_base_fake_migrate(self):
""" Whether to have Web2Py create the .table files to match the expected SQL database structure """
return self.base.get("fake_migrate", False)
def get_base_prepopulate(self):
""" Whether to prepopulate the database &, if so, which set of data to use for this """
return self.base.get("prepopulate", 1)
def get_base_prepopulate_demo(self):
"""For demo sites, which additional options to add to the list """
return self.base.get("prepopulate_demo", 0)
def get_base_guided_tour(self):
""" Whether the guided tours are enabled """
return self.base.get("guided_tour", self.has_module("tour"))
def get_base_public_url(self):
"""
The Public URL for the site - for use in email links, etc
"""
public_url = self.base.get("public_url")
if not public_url:
env = current.request.env
scheme = env.get("wsgi_url_scheme", "http").lower()
host = env.get("http_host") or "127.0.0.1:8000"
self.base.public_url = public_url = "%s://%s" % (scheme, host)
return public_url
def get_base_bigtable(self):
"""
Prefer scalability-optimized over small-table-optimized
strategies (where alternatives exist)
- resource/feature-specific overrides possible
"""
return self.base.get("bigtable", False)
def get_base_cdn(self):
"""
Should we use CDNs (Content Distribution Networks) to serve some common CSS/JS?
"""
return self.base.get("cdn", False)
def get_chat_server(self):
"""
Get the IP:port of the chat server if enabled or return False
"""
return self.base.get("chat_server", False)
def get_chatdb_string(self):
db_string = "%(type)s://%(user)s:%(pass)s@%(host)s:%(port)s/%(name)s"
chat_server = self.base.get("chat_server", False)
csget = chat_server.get
dbget = self.database.get
db_type = chat_server.get("server_db_type")
if db_type == "mysql":
default_port = 3306
elif db_type == "postgres":
default_port = 5432
else:
from gluon import HTTP
raise HTTP(501, body="Database type '%s' not recognised - please correct file models/000_config.py." % db_type)
db_params = {
"type": db_type,
"user": csget("server_db_username") or dbget("username", "sahana"),
"pass": csget("server_db_password") or dbget("password", "password"),
"host": csget("server_db_ip") or dbget("host", "localhost"),
"port": csget("server_db_port") or dbget("port", default_port),
"name": csget("server_db") or dbget("database", "openfiredb"),
}
return db_string % db_params
def get_base_session_db(self):
"""
Should we store sessions in the database to avoid locking sessions on long-running requests?
"""
# @ToDo: Set this as the default when running MySQL/PostgreSQL after more testing
result = self.base.get("session_db", False)
if result:
db_type = self.get_database_type()
if db_type == "sqlite":
# Never store the sessions in the DB if running SQLite
result = False
return result
def get_base_session_memcache(self):
"""
Should we store sessions in a Memcache service to allow sharing
between multiple instances?
"""
return self.base.get("session_memcache", False)
def get_base_solr_url(self):
"""
URL to connect to solr server
"""
return self.base.get("solr_url", False)
def get_xml_formats(self):
"""
Locations of custom export/import transformation stylesheets
- settings.base.xml_formats = {"<ext>": "<TMP>"}
=> modules/templates/<TMP>/formats/<ext>/<method>.xsl
"""
return self.base.get("xml_formats")
def get_import_callback(self, tablename, callback):
"""
Lookup callback to use for imports in the following order:
- custom [create, update]_onxxxx
- default [create, update]_onxxxx
- custom onxxxx
- default onxxxx
NB: Currently only onaccept is actually used
"""
callbacks = self.base.get("import_callbacks", [])
if tablename in callbacks:
callbacks = callbacks[tablename]
if callback in callbacks:
return callbacks[callback]
get_config = current.s3db.get_config
default = get_config(tablename, callback)
if default:
return default
if callback[:2] != "on":
callback = callback[7:]
if callback in callbacks:
return callbacks[callback]
default = get_config(tablename, callback)
if default:
return default
return None
# -------------------------------------------------------------------------
# Logger settings
def get_log_level(self):
"""
Minimum severity level for logger: "DEBUG", "INFO", "WARNING",
"ERROR", "CRITICAL". None = turn off logging
"""
return "DEBUG" if self.base.get("debug") \
else self.log.get("level")
def get_log_console(self):
"""
True to enable console logging (sys.stderr)
"""
return self.log.get("console", True)
def get_log_logfile(self):
"""
Log file name, None to turn off log file output
"""
return self.log.get("logfile")
def get_log_caller_info(self):
"""
True to enable detailed caller info in log (filename,
line number, function name), useful for diagnostics
"""
return self.log.get("caller_info", False)
# -------------------------------------------------------------------------
# Database settings
#
def get_database_type(self):
"""
Get the database type
"""
return self.database.get("db_type", "sqlite").lower()
def get_database_string(self):
"""
Database string and pool-size for PyDAL (models/00_db.py)
@return: tuple (db_type, db_string, pool_size)
"""
parameters = self.db_params
db_type = parameters["type"]
if db_type == "sqlite":
db_string = "sqlite://storage.db"
elif db_type in ("mysql", "postgres"):
db_string = "%(type)s://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s" % \
parameters
else:
from gluon import HTTP
raise HTTP(501, body="Database type '%s' not recognised - please correct file models/000_config.py." % db_type)
return (db_type, db_string, self.database.get("pool_size", 30))
def get_database_airegex(self):
"""
Whether to instead of LIKE use REGEXP with groups of diacritic
alternatives of characters to enforce accent-insensitive matches
in text search (for SQLite and PostgreSQL, neither of which
applies collation rules in LIKE)
@note: MySQL's REGEXP implementation is not multibyte-safe,
so AIRegex is ignored for MySQL.
@note: However, MYSQL's LIKE applies collation rules, so
accent-insensitivity can be achieved by settings a
suitable default database collation with:
ALTER DATABASE <dbname> DEFAULT COLLATE <collname>
Caution: this will trigger a rebuilt of all indices, so
on a populated production database this could
take quite a long time (but is needed only once)!
@note: SQLite fails on Windows Python 2.7.10 with current PyDAL
(PR coming for PyDAL)
@note: AIRegex is much less scalable than normal LIKE or even
ILIKE, enable/disable on a case-by-case basis in case
of performance issues (which is also why this is a lazy
setting), or consider switching to MySQL altogether
"""
if self.get_database_type() != "mysql":
airegex = self.__lazy("database", "airegex", False)
else:
airegex = False
return airegex
# -------------------------------------------------------------------------
# Finance settings
def get_fin_currency_writable(self):
"""
Can the user select a Currency?
"""
return self.fin.get("currency_writable", True)
def get_fin_currencies(self):
"""
Which Currencies can the user select?
"""
currencies = self.__lazy("fin", "currencies", {})
if currencies == {}:
currencies = {
"EUR": "Euros",
"GBP": "Great British Pounds",
"USD": "United States Dollars",
}
return currencies
def get_fin_currency_default(self):
"""
What is the default Currency?
"""
return self.__lazy("fin", "currency_default", default="USD")
def get_fin_voucher_personalize(self):
"""
Bearer identification feature to use for vouchers
- dob => bearer date of birth
- pin => PIN code
"""
return self.fin.get("voucher_personalize")
def get_fin_voucher_eligibility_types(self):
"""
Enable UI to manage eligibility types in voucher programs
"""
return self.fin.get("voucher_eligibility_types")
def get_fin_voucher_invoice_status_labels(self):
"""
Customise labels for invoice statuses
- dict {status: label}
- NEW, PAID, REJECTED are mandatory, can only change labels
- VERIFIED and APPROVED are optional, can be set to None to
disable completely
"""
return self.fin.get("voucher_invoice_status_labels")
def get_fin_voucher_claim_paid_label(self):
"""
Custom label for claim PAID-Status
"""
return self.fin.get("voucher_claim_paid_label", "Paid")
# -------------------------------------------------------------------------
# GIS (Map) Settings
#
def get_gis_api_bing(self):
""" API key for Bing """
return self.gis.get("api_bing")
def get_gis_api_getaddress(self):
"""
API key for GetAddress.io
"""
return self.gis.get("api_getaddress")
def get_gis_api_google(self):
"""
API key for Google Maps
"""
return self.gis.get("api_google", "")
def get_gis_api_openweathermap(self):
"""
API key for Open Weather Map
"""
return self.gis.get("api_openweathermap", "")
def get_gis_bbox_min_size(self):
"""
Minimum size for BBOX around Features on Map
- so that there is always some Map around a Point
Value is in degrees
"""
return self.gis.get("bbox_min_size", 0.05)
def get_gis_bbox_inset(self):
"""
BBOX inset around Features on Map
- so that ones on the edge don't get cut-off
"""
return self.gis.get("bbox_inset", 0.007)
def get_gis_building_name(self):
"""
Display Building Name when selecting Locations
"""
return self.gis.get("building_name", True)
def get_gis_check_within_parent_boundaries(self):
"""
Whether location Lat/Lons should be within the boundaries of the parent
"""
return self.gis.get("check_within_parent_boundaries", True)
def get_gis_cluster_fill(self):
"""
Fill for Clustered points on Map, else default
"""
return self.gis.get("cluster_fill")
def get_gis_cluster_label(self):
"""
Label Clustered points on Map?
"""
return self.gis.get("cluster_label", True)
def get_gis_cluster_stroke(self):
"""
Stroke for Clustered points on Map, else default
"""
return self.gis.get("cluster_stroke")
def get_gis_select_fill(self):
"""
Fill for Selected points on Map, else default
"""
return self.gis.get("select_fill")
def get_gis_select_stroke(self):
"""
Stroke for Selected points on Map, else default
"""
return self.gis.get("select_stroke")
def get_gis_clear_layers(self):
"""
Display Clear Layers Tool
- defaults to being above Map's Layer Tree, but can also be set to "toolbar"
"""
return self.gis.get("clear_layers", False)
def get_gis_config_screenshot(self):
"""
Should GIS configs save a screenshot when saved?
- set the size if True: (width, height)
"""
return self.gis.get("config_screenshot")
def get_gis_countries(self):
"""
Which ISO2 country codes should be accessible to the location selector?
"""
return self.gis.get("countries", [])
def get_gis_display_l0(self):
return self.gis.get("display_L0", False)
def get_gis_display_l1(self):
return self.gis.get("display_L1", True)
def get_gis_duplicate_features(self):
"""
Display duplicate features either side of the International date line?
"""
return self.gis.get("duplicate_features", False)
def get_gis_edit_group(self):
"""
Edit Location Groups
"""
return self.gis.get("edit_GR", False)
def get_gis_geocode_service(self):
"""
Which Geocoder Service should be used?
Supported options:
"nominatim" (default)
"geonames"
"google"
"""
return self.gis.get("geocode_service", "nominatim")
def get_gis_geocode_imported_addresses(self):
"""
Should Addresses imported from CSV be passed to a
Geocoder to try and automate Lat/Lon?
"""
return self.gis.get("geocode_imported_addresses", False)
def get_gis_ignore_geocode_errors(self):
"""
Whether failure to geocode imported addresses shall
lead to a validation error
"""
return self.gis.get("ignore_geocode_errors", False)
def get_gis_geolocate_control(self):
"""
Whether the map should have a Geolocate control
- also requires the presence of a Toolbar
"""
return self.gis.get("geolocate_control", True)
def get_gis_geonames_username(self):
"""
Username for the GeoNames search box
"""
return self.gis.get("geonames_username")
def get_gis_geoserver_url(self):
return self.gis.get("geoserver_url", "")
def get_gis_geoserver_username(self):
return self.gis.get("geoserver_username", "admin")
def get_gis_geoserver_password(self):
return self.gis.get("geoserver_password", "")
def get_gis_getfeature_control(self):
"""
Whether the map should have a WMS GetFeatureInfo control
- also requires the presence of a Toolbar and queryable WMS layers
"""
return self.gis.get("getfeature_control", True)
def get_gis_latlon_selector(self):
"""
Display Lat/Lon form fields when selecting Locations
"""
return self.gis.get("latlon_selector", False)
def get_gis_layer_metadata(self):
"""
Use CMS to provide Metadata on Map Layers
"""
return self.has_module("cms") and self.gis.get("layer_metadata", False)
def get_gis_layer_properties(self):
"""
Display Layer Properties Tool above Map's Layer Tree
"""
return self.gis.get("layer_properties", True)
def get_gis_layer_tree_base(self):
" Display Base Layers folder in the Map's Layer Tree "
return self.gis.get("layer_tree_base", True)
def get_gis_layer_tree_overlays(self):
" Display Overlays folder in the Map's Layer Tree "
return self.gis.get("layer_tree_overlays", True)
def get_gis_layer_tree_expanded(self):
" Display folders in the Map's Layer Tree Open by default "
return self.gis.get("layer_tree_expanded", True)
def get_gis_layer_tree_radio(self):
" Use a radio button for custom folders in the Map's Layer Tree "
return self.gis.get("layer_tree_radio", False)
def get_gis_layers_label(self):
" Label for the Map's Layer Tree "
return self.gis.get("layers_label", "Layers")
def get_gis_location_filter_bigtable_lookups(self):
"""
Location filter to use scalability-optimized option lookups
- can be overridden by filter widget option (bigtable)
- defaults to base.bigtable
"""
setting = self.gis.get("location_filter_bigtable_lookups")
return setting if setting is not None else self.get_base_bigtable()
def get_gis_location_represent_address_only(self):
"""
Never use LatLon for Location Represents
"""
return self.gis.get("location_represent_address_only", False)
def get_gis_map_height(self):
"""
Height of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_height", 600)
def get_gis_map_width(self):
"""
Width of the Embedded Map
Change this if-required for your theme
NB API can override this in specific modules
"""
return self.gis.get("map_width", 1000)
def get_gis_map_selector(self):
" Display a Map-based tool to select Locations "
return self.gis.get("map_selector", True)
def get_gis_map_selector_height(self):
""" Height of the map selector map """
return self.gis.get("map_selector_height", 340)
def get_gis_map_selector_width(self):
""" Width of the map selector map """
return self.gis.get("map_selector_width", 480)
def get_gis_marker_max_height(self):
return self.gis.get("marker_max_height", 35)
def get_gis_marker_max_width(self):
return self.gis.get("marker_max_width", 30)
def get_gis_max_features(self):
"""
The maximum number of features to return in a Map Layer
- more than this will prompt the user to zoom in to load the layer
Lower this number to get extra performance from an overloaded server.
"""
return self.gis.get("max_features", 2000)
def get_gis_legend(self):
"""
Should we display a Legend on the Map?
- set to True to show a GeoExt Legend (default)
- set to False to not show a Legend
- set to "float" to use a floating DIV
"""
return self.gis.get("legend", True)
def get_gis_menu(self):
"""
Should we display a menu of GIS configurations?
- set to False to not show the menu (default)
- set to the label to use for the menu to enable it
e.g. T("Events") or T("Regions")
"""
return self.gis.get("menu", False)
def get_gis_mouse_position(self):
"""
What style of Coordinates for the current Mouse Position
should be shown on the Map?
'normal', 'mgrs' or False
"""
return self.gis.get("mouse_position", "normal")
def get_gis_nav_controls(self):
"""
Should the Map Toolbar display Navigation Controls?
"""
return self.gis.get("nav_controls", False)
def get_gis_label_overlays(self):
"""
Label for the Map Overlays in the Layer Tree
"""
return self.gis.get("label_overlays", "Overlays")
def get_gis_overview(self):
"""
Should the Map display an Overview Map?
"""
return self.gis.get("overview", True)
def get_gis_permalink(self):
"""
Should the Map display a Permalink control?
"""
return self.gis.get("permalink", True)
def get_gis_poi_create_resources(self):
"""
List of resources which can be directly added to the main map.
Includes the type (point, line or polygon) and where they are to be
accessed from (button, menu or popup)
Defaults to the generic 'gis_poi' resource as a point from a button
@ToDo: Complete the button vs menu vs popup
@ToDo: S3PoIWidget() to allow other resources to pickup the passed Lat/Lon/WKT
"""
T = current.T
return self.gis.get("poi_create_resources",
[{"c": "gis", # Controller
"f": "poi", # Function
"table": "gis_poi", # For permissions check
# Default:
#"type": "point", # Feature Type: point, line or polygon
"label": T("Add PoI"), # Label
#"tooltip": T("Add PoI"), # Tooltip
"layer": "PoIs", # Layer Name to refresh
"location": "button", # Location to access from
},
]
)
def get_gis_poi_export_resources(self):
"""
List of resources (tablenames) to import/export as PoIs from Admin Locations
- KML & OpenStreetMap formats
"""
return self.gis.get("poi_export_resources",
["cr_shelter", "hms_hospital", "org_office"])
def get_gis_postcode_selector(self):
"""
Display Postcode form field when selecting Locations
"""
return self.__lazy("gis", "postcode_selector", default=True)
def get_gis_postcode_to_address(self):
"""
Service to use for Postcode to Address lookups in LocationSelector
Supported Options:
* getaddress (GetAddress.io)
"""
return self.__lazy("gis", "postcode_to_address", default=None)
def get_gis_print(self):
"""
Should the Map display a Print control?
NB Requires installation of additional components:
http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
"""
return self.gis.get("print_button", False)
#def get_gis_print_service(self):
# """
# URL for an external Print Service (based on the MapFish plugin for GeoServer)
# http://eden.sahanafoundation.org/wiki/BluePrint/GIS/Printing
# """
# return self.gis.get("print_service", "")
def get_gis_save(self):
"""
Should the main Map display a Save control?
If there is a Toolbar, then this defaults to being inside the Toolbar, otherwise floating.
If you wish it to float even when there is a toolbar, then specify "float"
"""
return self.gis.get("save", True)
def get_gis_scaleline(self):
"""
Should the Map display a ScaleLine control?
"""
return self.gis.get("scaleline", True)
def get_gis_search_geonames(self):
"""
Whether the GeoNames search box should be visible on the map
"""
return self.gis.get("search_geonames", True)
def get_gis_simplify_tolerance(self):
"""
Default Tolerance for the Simplification of Polygons
- a lower value means less simplification, which is suitable for higher-resolution local activities
- a higher value is suitable for global views
- set to 0 to disable
"""
return self.gis.get("simplify_tolerance", 0.01)
def get_gis_precision(self):
"""
Number of Decimal places to put in output
Increase this to 5 for highly-zoomed maps showing buildings
"""
return self.gis.get("precision", 4)
def get_gis_spatialdb(self):
"""
Does the database have Spatial extensions?
"""
db_type = self.get_database_type()
if db_type != "postgres":
# Only Postgres supported currently
return False
else:
return self.gis.get("spatialdb", False)
def get_gis_widget_catalogue_layers(self):
"""
Should Map Widgets display Catalogue Layers?
- e.g. Profile & Summary pages
"""
return self.gis.get("widget_catalogue_layers", False)
def get_gis_widget_wms_browser(self):
"""
Should Map Widgets display a WMS Browser?
- e.g. Profile & Summary pages
NB This also requires the active gis_config to have one configured
"""
return self.gis.get("widget_wms_browser", False)
def get_gis_toolbar(self):
"""
Should the main Map display a Toolbar?
"""
return self.gis.get("toolbar", True)
def get_gis_zoomcontrol(self):
"""
Should the Map display a Zoom control?
"""
return self.gis.get("zoomcontrol", True)
def get_gis_lookup_code(self):
"""
Should the gis_location deduplication try codes as well as names?
- if-desired, set to the Key of a Key/Value pair (e.g. "PCode")
"""
return self.gis.get("lookup_code", False)
def get_gis_popup_location_link(self):
"""
Whether a Pop-up Window should open on clicking
Location represent links
- Default: Map opens in a div
"""
return self.gis.get("popup_location_link", False)
def get_gis_xml_wkt(self):
"""
Whether XML exports should include the bulky WKT
"""
return self.gis.get("xml_wkt", False)
# -------------------------------------------------------------------------
# L10N Settings
def get_L10n_default_language(self):
return self.L10n.get("default_language", "en")
def get_L10n_display_toolbar(self):
return self.L10n.get("display_toolbar", True)
def get_L10n_extra_codes(self):
"""
Extra codes for IS_ISO639_2_LANGUAGE_CODE
e.g. CAP needs to add "en-US"
"""
return self.L10n.get("extra_codes", None)
def get_L10n_languages(self):
return self.L10n.get("languages")
def get_L10n_languages_readonly(self):
return self.L10n.get("languages_readonly", True)
def get_L10n_religions(self):
"""
Religions used in Person Registry
@ToDo: find a better code
http://eden.sahanafoundation.org/ticket/594
"""
T = current.T
return self.L10n.get("religions", {"none": T("none"),
"christian": T("Christian"),
"muslim": T("Muslim"),
"jewish": T("Jewish"),
"buddhist": T("Buddhist"),
"hindu": T("Hindu"),
"bahai": T("Bahai"),
"other": T("other")
})
def get_L10n_ethnicity(self):
"""
Ethnicities used in Person Registry
- defaults to free-text
- dropdown options can be configured in Template (e.g. Locale)
"""
return self.L10n.get("ethnicity", None)
def get_L10n_date_format(self):
"""
Lookup the Date Format - either by locale or by global setting
"""
language = current.session.s3.language
if language in self.date_formats:
return self.date_formats.get(language)
else:
return self.L10n.get("date_format", "%Y-%m-%d")
def get_L10n_time_format(self):
return self.L10n.get("time_format", "%H:%M")
def get_L10n_datetime_separator(self):
return self.L10n.get("datetime_separator", " ")
def get_L10n_datetime_format(self):
return "%s%s%s" % (self.get_L10n_date_format(),
self.get_L10n_datetime_separator(),
self.get_L10n_time_format()
)
def get_L10n_timezone(self):
"""
The default timezone for datetime representation in the UI,
fallback if the client timezone or UTC offset can not be
determined (e.g. user not logged in, or not browser-based)
* A list of available timezones can be viewed at:
https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
or retrieved with:
import os, tarfile, dateutil.zoneinfo
path = os.path.abspath(os.path.dirname(dateutil.zoneinfo.__file__))
zonesfile = tarfile.TarFile.open(os.path.join(path, 'dateutil-zoneinfo.tar.gz'))
zonenames = zonesfile.getnames()
"""
return self.__lazy("L10n", "timezone")
def get_L10n_firstDOW(self):
"""
First day of the week (overrides calendar default)
0 = Sunday, 1 = Monday, ..., 6 = Saturday
None = use the calendar's default
"""
return self.L10n.get("firstDOW", None)
def get_L10n_calendar(self):
"""
Which calendar to use (lazy setting)
Currently supported calendars:
- "Gregorian"
"""
return self.__lazy("L10n", "calendar", None)
def get_L10n_lat_lon_format(self):
"""
This is used to format latitude and longitude fields when they are
displayed by eden. The format string may include the following
placeholders:
- %d -- Degress (integer)
- %m -- Minutes (integer)
- %s -- Seconds (double)
- %f -- Degrees in decimal (double)
"""
return self.L10n.get("lat_lon_display_format", "%f")
def get_L10n_default_country_code(self):
""" Default Telephone Country Code """
return self.L10n.get("default_country_code", 1)
def get_L10n_mandatory_lastname(self):
return self.__lazy("L10n", "mandatory_lastname", False)
def get_L10n_mandatory_middlename(self):
"""
e.g. Apellido Paterno in Hispanic names
Setting this means that auth_user.last_name matches with pr_person.middle_name
e.g. RMSAmericas
"""
return self.__lazy("L10n", "mandatory_middlename", False)
def get_L10n_decimal_separator(self):
"""
What should the decimal separator be in formatted numbers?
- falls back to ISO standard of a comma
"""
return self.L10n.get("decimal_separator", ",")
def get_L10n_thousands_separator(self):
"""
What should the thousands separator be in formatted numbers?
- falls back to ISO standard of a space
"""
return self.L10n.get("thousands_separator", " ")
def get_L10n_thousands_grouping(self):
return self.L10n.get("thousands_grouping", 3)
def get_L10n_translate_cms_series(self):
"""
Whether to translate CMS Series names
"""
return self.L10n.get("translate_cms_series", False)
def get_L10n_translate_gis_layer(self):
"""
Whether to translate Layer names
"""
return self.L10n.get("translate_gis_layer", False)
def get_L10n_translate_gis_location(self):
"""
Whether to translate Location names
"""
return self.L10n.get("translate_gis_location", False)
def get_L10n_name_alt_gis_location(self):
"""
Whether to use Alternate Location names
"""
return self.L10n.get("name_alt_gis_location", False)
def get_L10n_translate_org_organisation(self):
"""
Whether to translate Organisation names/acronyms
"""
return self.L10n.get("translate_org_organisation", False)
def get_L10n_translate_org_site(self):
"""
Whether to translate Site names
"""
return self.L10n.get("translate_org_site", False)
def get_L10n_translate_cap_area(self):
"""
Whether to translate CAP Area names
"""
return self.L10n.get("translate_cap_area", False)
def get_L10n_pootle_url(self):
""" URL for Pootle server """
return self.L10n.get("pootle_url", "http://pootle.sahanafoundation.org/")
def get_L10n_pootle_username(self):
""" Username for Pootle server """
return self.L10n.get("pootle_username", False)
def get_L10n_pootle_password(self):
""" Password for Pootle server """
return self.L10n.get("pootle_password", False)
# -------------------------------------------------------------------------
# PDF settings
#
def get_pdf_size(self):
"""
PDF default page size
* "A4"
* "Letter"
* or a tuple (width, height) in points (1 inch = 72 points)
=> pre-defined tuples in reportlab.lib.pagesizes
"""
return self.base.get("pdf_size", "A4")
def get_pdf_orientation(self):
"""
PDF default page orientation
* Auto (Portrait if possible, Landscape for wide tables)
* Portrait
* Landscape
"""
return self.base.get("pdf_orientation", "Auto")
def get_pdf_bidi(self):
"""
Enable BiDi support for PDF exports
- without this RTL text will be LTR
- default off to enhance performance
"""
return self.__lazy("L10n", "pdf_bidi", False)
def get_pdf_logo(self):
return self.ui.get("pdf_logo")
def get_pdf_export_font(self):
language = current.session.s3.language
return self.__lazy("L10n", "pdf_export_font", self.fonts.get(language))
def get_pdf_excluded_fields(self, resourcename):
"""
Optical Character Recognition (OCR)
"""
excluded_fields = self.pdf.get("excluded_fields")
if excluded_fields is None:
excluded_fields = {"hms_hospital": ["hrm_human_resource",
],
"pr_group": ["pr_group_membership",
],
}
return excluded_fields.get(resourcename, [])
def get_pdf_max_rows(self):
"""
Maximum number of records in a single PDF table/list export
- None for unlimited
"""
return self.base.get("pdf_max_rows", 1000)
# -------------------------------------------------------------------------
# XLS Export Settings
#
def get_xls_title_row(self):
"""
Include a title row in XLS Exports
- default=False to allow easy post-export column sorting
- uses the "title_list" CRUD string + export date/time
- standard title can be overridden in exporter call
"""
return self.base.get("xls_title_row", False)
# -------------------------------------------------------------------------
# UI Settings
#
@classmethod
def _get_formstyle(cls, setting):
""" Helper function to identify a formstyle """
if callable(setting):
# A custom formstyle defined in the template
formstyle = setting
if setting in FORMSTYLES:
# One of the standard supported formstyles
formstyle = FORMSTYLES[setting]
else:
# A default web2py formstyle
formstyle = setting
return formstyle
def get_ui_formstyle(self):
""" Get the current form style """
setting = self.ui.get("formstyle", "default")
return self._get_formstyle(setting)
def get_ui_formstyle_read(self):
""" Get the current form style for read views """
setting = self.ui.get("formstyle_read")
if setting is not None:
formstyle = self._get_formstyle(setting)
else:
# Fall back to default formstyle
formstyle = self.get_ui_formstyle()
return formstyle
def get_ui_filter_formstyle(self):
""" Get the current filter form style """
setting = self.ui.get("filter_formstyle", "default_inline")
return self._get_formstyle(setting)
def get_ui_report_formstyle(self):
""" Get the current report form style """
setting = self.ui.get("report_formstyle")
return self._get_formstyle(setting)
def get_ui_inline_formstyle(self):
""" Get the _inline formstyle for the current formstyle """
setting = self.ui.get("formstyle", "default")
if isinstance(setting, basestring):
# Try to find the corresponding _inline formstyle
inline_formstyle_name = "%s_inline" % setting
formstyle = FORMSTYLES.get(inline_formstyle_name)
else:
formstyle = None
if formstyle is None:
# Fall back to default formstyle
formstyle = self._get_formstyle(setting)
return formstyle
def get_ui_datatables_dom(self):
"""
DOM layout for dataTables:
https://datatables.net/reference/option/dom
"""
return self.ui.get("datatables_dom", "fril<'dataTable_table't>pi")
def get_ui_datatables_initComplete(self):
"""
Callback for dataTables
- allows moving objects such as data_exports
"""
return self.ui.get("datatables_initComplete")
def get_ui_datatables_pagingType(self):
"""
The style of Paging used by dataTables:
https://datatables.net/reference/option/pagingType
"""
return self.ui.get("datatables_pagingType", "full_numbers")
def get_ui_datatables_responsive(self):
""" Make data tables responsive (auto-collapsing columns when too wide) """
return self.ui.get("datatables_responsive", True)
def get_ui_datatables_double_scroll(self):
""" Render double scroll bars (top+bottom) for non-responsive data tables """
return self.ui.get("datatables_double_scroll", False)
def get_ui_auto_open_update(self):
"""
Render "Open" action buttons in datatables without explicit
CRUD-method => this allows automatic per-record decision
whether to open as update- or read-form based on permissions,
e.g. if the user doesn't have permission to update for all
records in the datatable due to oACL or realm-restriction
"""
return self.ui.get("auto_open_update", False)
def get_ui_open_read_first(self):
"""
Render "Open" action buttons with explicit "read" method
irrespective permissions (i.e. always, even if the user
were permitted to edit records)
"""
return self.ui.get("open_read_first", False)
def get_ui_default_cancel_button(self):
"""
Whether to show a default cancel button in standalone
create/update forms
"""
return self.ui.get("default_cancel_button", False)
def get_ui_filter_clear(self):
"""
Whether to show a clear button in default FilterForms
- and allows possibility to relabel &/or add a class
"""
return self.ui.get("filter_clear", True)
def get_ui_icons(self):
"""
Standard icon set, one of:
- "font-awesome"
- "foundation"
- "font-awesome3"
"""
return self.ui.get("icons", "font-awesome")
def get_ui_custom_icons(self):
"""
Custom icon CSS classes, a dict {abstract name: CSS class},
can be used to partially override standard icons
"""
return self.ui.get("custom_icons")
def get_ui_icon_layout(self):
"""
Callable to render icon HTML, which takes an ICON instance
as parameter and returns valid XML as string
"""
return self.ui.get("icon_layout")
def get_ui_calendar_clear_icon(self):
"""
Render clear-button for calendar inputs just as an icon
(S3CalendarWidget, requires Foundation + font-awesome)
"""
return self.ui.get("calendar_clear_icon", False)
# -------------------------------------------------------------------------
def get_ui_auto_keyvalue(self):
"""
Should crud_form & list_fields automatically display all Keys in KeyValue tables?
- can be set to False, True or a list of tablenames for which it is True
"""
return self.ui.get("auto_keyvalue", False)
def get_ui_auth_user_represent(self):
"""
Should the auth_user created_by/modified_by be represented by Name or Email?
- defaults to email
"""
return self.ui.get("auth_user_represent", "email")
def get_ui_confirm(self):
"""
For Delete actions
Workaround for this Bug in Selenium with FF4:
http://code.google.com/p/selenium/issues/detail?id=1604
"""
return self.ui.get("confirm", True)
def get_ui_export_formats(self):
"""
Which export formats should we display?
- specify a list of export formats to restrict/override
- each list item can be
* a string with the format extension
* a tuple (extension, css-class[, onhover-title])
"""
return self.ui.get("export_formats",
("cap", "have", "kml", "map", "pdf", "rss", "xls", "xml"))
def get_ui_hide_report_filter_options(self):
"""
Show report filter options form by default
"""
return self.ui.get("hide_report_filter_options", False)
def get_ui_hide_report_options(self):
"""
Hide report options form by default
"""
return self.ui.get("hide_report_options", True)
def get_ui_iframe_opens_full(self):
"""
Open links in IFrames should open a full page in a new tab
"""
return self.ui.get("iframe_opens_full", False)
def get_ui_interim_save(self):
"""
Render interim-save button in CRUD forms by default
"""
return self.ui.get("interim_save", False)
def get_ui_label_attachments(self):
"""
Label for attachments tab
"""
return current.T(self.ui.get("label_attachments", "Attachments"))
def get_ui_label_camp(self):
""" 'Camp' instead of 'Shelter'? """
return self.ui.get("camp", False)
def get_ui_label_cluster(self):
""" UN-style deployment? """
return self.ui.get("cluster", False)
def get_ui_label_locationselector_map_point_add(self):
"""
Label for the Location Selector button to add a Point to the Map
e.g. 'Place on Map'
"""
return current.T(self.ui.get("label_locationselector_map_point_add", "Place on Map"))
def get_ui_label_locationselector_map_point_view(self):
"""
Label for the Location Selector button to view a Point on the Map
e.g. 'View on Map'
"""
return current.T(self.ui.get("label_locationselector_map_point_view", "View on Map"))
def get_ui_label_locationselector_map_polygon_add(self):
"""
Label for the Location Selector button to draw a Polygon on the Map
e.g. 'Draw on Map'
"""
return current.T(self.ui.get("label_locationselector_map_polygon_add", "Draw on Map"))
def get_ui_label_locationselector_map_polygon_view(self):
"""
Label for the Location Selector button to view a Polygon on the Map
e.g. 'View on Map'
"""
return current.T(self.ui.get("label_locationselector_map_polygon_view", "View on Map"))
def get_ui_label_mobile_phone(self):
"""
Label for the Mobile Phone field
e.g. 'Cell Phone'
"""
return current.T(self.ui.get("label_mobile_phone", "Mobile Phone"))
def get_ui_label_permalink(self):
"""
Label for the Permalink on dataTables
- set to None to disable
"""
return self.ui.get("label_permalink", "Link to this result")
def get_ui_label_postcode(self):
"""
Label for the Postcode field
e.g. 'ZIP Code'
"""
return current.T(self.ui.get("label_postcode", "Postcode"))
def get_ui_label_read(self):
"""
Label for buttons in list views which lead to a Read-only 'Display' page
"""
return self.ui.get("read_label", "Open")
def get_ui_label_update(self):
"""
Label for buttons in list views which lead to an Editable 'Update' page
"""
return self.ui.get("update_label", "Open")
def get_ui_multiselect_widget(self):
"""
Whether all dropdowns should use the S3MultiSelectWidget
- currently respected by Auth Registration & S3LocationSelector
Options:
False (default): No widget
True: Widget, with no header
"search": Widget with the search header
"""
return self.ui.get("multiselect_widget", False)
def get_ui_navigate_away_confirm(self):
"""
Whether to enable a warning when users navigate away from a page with unsaved changes
"""
return self.ui.get("navigate_away_confirm", True)
def get_ui_search_submit_button(self):
"""
Class for submit buttons in search views
"""
return self.ui.get("search_submit_button", "search-button")
def get_ui_social_buttons(self):
"""
Display social media Buttons in the footer?
- requires support in the Theme
"""
return self.ui.get("social_buttons", False)
def get_ui_summary(self):
"""
Default Summary Page Configuration (can also be
configured per-resource using s3db.configure)
@example:
settings.ui.summary = [
{
"name": "table", # the section name
"label": "Table", # the section label, will
# automatically be translated
"common": False, # show this section on all tabs
"translate": True, # turn automatic label translation on/off
"widgets": [ # list of widgets for this section
{
"method": "datatable", # widget method, either a
# name that resolves into
# a S3Method, or a callable
# to render the widget
"filterable": True, # Whether the widget can
# be filtered by the summary
# filter form
}
]
}
]
"""
return self.ui.get("summary", ({"common": True,
"name": "add",
"widgets": [{"method": "create"}],
},
{"common": True,
"name": "cms",
"widgets": [{"method": "cms"}]
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "charts",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
))
def get_ui_autocomplete_delay(self):
"""
Time in milliseconds after the last keystroke in an AC field
to start the search
"""
return self.__lazy("ui", "autocomplete_delay", 800)
def get_ui_autocomplete_min_chars(self):
"""
Minimum charcters in an AC field to start the search
"""
return self.__lazy("ui", "autocomplete_min_chars", 2)
def get_ui_filter_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("filter_auto_submit", 800)
def get_ui_report_auto_submit(self):
"""
Time in milliseconds after the last filter option change to
automatically update the filter target(s), set to 0 to disable
"""
return self.ui.get("report_auto_submit", 800)
def get_ui_report_timeout(self):
"""
Time in milliseconds to wait for a Report's AJAX call to complete
"""
return self.ui.get("report_timeout", 10000)
def get_ui_use_button_icons(self):
"""
Use icons on action buttons (requires corresponding CSS)
"""
return self.ui.get("use_button_icons", False)
def get_ui_hierarchy_theme(self):
"""
Theme for the S3HierarchyWidget.
'css' is a folder relative to static/styles
- /jstree.css or /jstree.min.css is added as-required
"""
return self.ui.get("hierarchy_theme", dict(css = "plugins",
icons = False,
stripes = True,
))
def get_ui_hierarchy_cascade_option_in_tree(self):
"""
Whether hierarchy widgets show a "Select All" option in
the tree (True) or as context menu of the parent node.
"""
return self.ui.get("hierarchy_cascade_option_in_tree", True)
def get_ui_hierarchy_filter_bulk_select_option(self):
"""
Whether or not to show a bulk-select option in hierarchical
filter widgets (overrides per-widget setting)
"""
return self.ui.get("hierarchy_filter_bulk_select_option")
def get_ui_location_filter_bulk_select_option(self):
"""
Whether or not to show a bulk-select option in location
filter widgets (overrides per-widget setting)
"""
return self.__lazy("ui", "location_filter_bulk_select_option")
def get_ui_inline_component_layout(self):
"""
Layout for S3SQLInlineComponent
"""
# Use this to also catch old-style classes (not recommended):
#import types
#elif isinstance(layout, (type, types.ClassType)):
layout = self.ui.get("inline_component_layout")
if not layout:
from s3 import S3SQLSubFormLayout
layout = S3SQLSubFormLayout()
elif isinstance(layout, type):
# Instantiate only now when it's actually requested
# (because it may inject JS which is not needed if unused)
layout = layout()
# Replace so it doesn't get instantiated twice
self.ui.inline_component_layout = layout
return layout
def get_ui_inline_cancel_edit(self):
"""
Behavior of inline components when switching edit rows
without explicit submit/cancel: cancel|submit|ask|refuse
"""
return self.ui.get("inline_cancel_edit", "ask")
def get_ui_profile_header(self, r):
"""
What Header should be shown in the Profile page
"""
#profile_header = self.__lazy("ui", "profile_header", None)
profile_header = self.ui.get("profile_header", None)
if profile_header:
profile_header = profile_header(r)
else:
# Default
from gluon import DIV, H2, P
try:
title = r.record.name
except AttributeError:
title = r.record.id
try:
comments = r.record.comments or ""
except AttributeError:
comments = ""
profile_header = DIV(H2(title),
P(comments),
_class="profile-header",
)
return profile_header
def get_ui_menu_logo(self):
"""
The menu logo for the default menu, can be:
- a string representing an image URL (can use URL())
- a T()
- an HTML helper instance (e.g. DIV or SPAN)
- None (to show system short name instead of a logo)
NB to override the entire title area, use the template's
menus.py and specify a title_area attribute for the
main menu node
"""
return self.__lazy("ui", "menu_logo",
URL(c = "static",
f = "img",
args = ["S3menu_logo.png"],
)
)
def get_ui_organizer_business_hours(self):
"""
Business hours to indicate in organizer,
- a dict {dow:[0,1,2,3,4,5,6], start: "HH:MM", end: "HH:MM"},
- or a list of such dicts
- dow 0 being Sunday
- False to disable
"""
return self.__lazy("ui", "organizer_business_hours", False)
def get_ui_organizer_time_format(self):
"""
The time format for organizer (overrides locale default)
"""
return self.__lazy("ui", "organizer_time_format", None)
def get_ui_organizer_snap_duration(self):
"""
Snap raster width in organizer (hh:mm:ss), default 00:15:00
"""
return self.__lazy("ui", "organizer_snap_duration", None)
# =========================================================================
# Messaging
#
def get_msg_max_send_retries(self):
"""
Maximum number of retries to send a message before
it is regarded as permanently failing; set to None
to retry forever.
"""
return self.msg.get("max_send_retries", 9)
def get_msg_basestation_code_unique(self):
"""
Validate for Unique Basestations Codes
"""
return self.msg.get("basestation_code_unique", False)
def get_msg_send_postprocess(self):
"""
Custom function that processes messages after they have been sent, eg.
link alert_id in cap module to message_id in message module
The function can be of form msg_send_postprocess(message_id, **data),
where message_id is the msg_message_id and
**data is the additional arguments to pass to s3msg.send_by_pe_id
"""
return self.msg.get("send_postprocess")
# -------------------------------------------------------------------------
# Mail settings
def get_mail_server(self):
return self.mail.get("server", "127.0.0.1:25")
def get_mail_server_login(self):
return self.mail.get("login", False)
def get_mail_server_tls(self):
"""
Does the Mail Server use TLS?
- default Debian is False
- GMail is True
"""
return self.mail.get("tls", False)
def get_mail_sender(self):
"""
The From Address for all Outbound Emails
"""
return self.mail.get("sender")
def get_mail_approver(self):
"""
The default Address to send Requests for New Users to be Approved
OR
UUID of Role of users who should receive Requests for New Users to be Approved
- unless overridden by per-domain entries in auth_organsiation
"""
return self.mail.get("approver", "useradmin@example.org")
def get_mail_default_subject(self):
"""
Use system_name_short as default email subject (Appended).
"""
return self.mail.get("default_email_subject", False)
def get_mail_auth_user_in_subject(self):
"""
Append name and surname of logged in user to email subject
"""
return self.mail.get("mail.auth_user_in_email_subject", False)
def get_mail_limit(self):
"""
A daily limit to the number of messages which can be sent
"""
return self.mail.get("limit")
# -------------------------------------------------------------------------
# Parser
def get_msg_parser(self):
"""
Which template folder to use to load parser.py
"""
return self.msg.get("parser", "default")
# -------------------------------------------------------------------------
# Notifications
def get_msg_notify_check_subscriptions(self):
"""
Whether to Check Subscriptions
"""
return self.msg.get("notify_check_subscriptions", False)
def get_msg_notify_subject(self):
"""
Template for the subject line in update notifications.
Available placeholders:
$S = System Name (long)
$s = System Name (short)
$r = Resource Name
Use {} to separate the placeholder from immediately following
identifier characters (like: ${placeholder}text).
"""
return self.msg.get("notify_subject",
"$s %s: $r" % current.T("Update Notification"))
def get_msg_notify_email_format(self):
"""
The preferred email format for update notifications,
"text" or "html".
"""
return self.msg.get("notify_email_format", "text")
def get_msg_notify_renderer(self):
"""
Custom content renderer function for update notifications,
function()
"""
return self.msg.get("notify_renderer")
def get_msg_notify_attachment(self):
"""
Custom function that returns the list of document_ids to be sent
as attachment in email
The function may be of the form:
custom_msg_notify_attachment(resource, data, meta_data), where
resource is the S3Resource, data: the data returned from
S3Resource.select and meta_data: the meta data for the notification
(see s3notify for the metadata)
"""
return self.msg.get("notify_attachment")
def get_msg_notify_send_data(self):
"""
Custom function that returns additional arguments to pass to
s3msg.send_by_pe_id
The function should be of the form:
custom_msg_notify_send_data(resource, data, meta_data), where
resource is the S3Resource, data: the data returned from
S3Resource.select and meta_data: the meta data for the notification
(see s3notify for the metadata)
"""
return self.msg.get("notify_send_data")
# -------------------------------------------------------------------------
# SMS
#
def get_msg_require_international_phone_numbers(self):
"""
Requires the E.123 international phone number
notation where needed (e.g. SMS)
"""
return self.msg.get("require_international_phone_numbers", True)
# =========================================================================
# Search
#
def get_search_max_results(self):
"""
The maximum number of results to return in an Autocomplete Search
- more than this will prompt the user to enter a more exact match
Lower this number to get extra performance from an overloaded server.
"""
return self.search.get("max_results", 200)
def get_search_dates_auto_range(self):
"""
Date filters to apply introspective range limits (by
looking up actual minimum/maximum dates from the records)
NB has scalability problems, so disabled by default =>
can be overridden per-widget using the "auto_range"
option (S3DateFilter)
"""
return self.search.get("dates_auto_range", False)
# Filter Manager Widget
def get_search_filter_manager(self):
""" Enable the filter manager widget """
return self.search.get("filter_manager", True)
def get_search_filter_manager_allow_delete(self):
""" Allow deletion of saved filters """
return self.search.get("filter_manager_allow_delete", True)
def get_search_filter_manager_save(self):
""" Text for saved filter save-button """
return self.search.get("filter_manager_save")
def get_search_filter_manager_update(self):
""" Text for saved filter update-button """
return self.search.get("filter_manager_update")
def get_search_filter_manager_delete(self):
""" Text for saved filter delete-button """
return self.search.get("filter_manager_delete")
def get_search_filter_manager_load(self):
""" Text for saved filter load-button """
return self.search.get("filter_manager_load")
# =========================================================================
# Setup
#
def get_setup_monitor_template(self):
"""
Which template folder to use to load monitor.py
"""
return self.setup.get("monitor_template", "default")
def get_setup_wizard_questions(self):
"""
Configuration options to see in the Setup Wizard
"""
return self.setup.get("wizard_questions", [])
# =========================================================================
# Sync
#
def get_sync_mcb_resource_identifiers(self):
"""
Resource (=data type) identifiers for synchronization with
Mariner CommandBridge, a dict {tablename:id}
"""
return self.sync.get("mcb_resource_identifiers", {})
def get_sync_mcb_domain_identifiers(self):
"""
Domain (of origin) identifiers for synchronization with
Mariner CommandBridge, a dict {domain: id} where
"domain" means the domain prefix of the record UUID
(e.g. uuid "wrike/IKY0192834" => domain "wrike"),
default domain is "sahana"
"""
return self.sync.get("mcb_domain_identifiers", {})
def get_sync_upload_filename(self):
"""
Filename for upload via FTP Sync
Available placeholders:
$S = System Name (long)
$s = System Name (short)
$r = Resource Name
Use {} to separate the placeholder from immediately following
identifier characters (like: ${placeholder}text).
"""
return self.sync.get("upload_filename", "$s $r")
def get_sync_data_repository(self):
""" This deployment is a public data repository """
return self.sync.get("data_repository", False)
# =========================================================================
# Modules
# -------------------------------------------------------------------------
# Asset: Asset Management
#
def get_asset_telephones(self):
"""
Whether Assets should include a specific type for Telephones
"""
return self.asset.get("telephones", False)
# -------------------------------------------------------------------------
# BR: Beneficiary Registry
#
def get_br_case_terminology(self):
"""
Terminology to use when referring to cases: Beneficiary|Client|Case
"""
return self.br.get("case_terminology", "Case")
def get_br_assistance_terminology(self):
"""
Terminology to use when referring to measures of assistance: Counseling|Assistance
"""
return self.br.get("assistance_terminology", "Assistance")
def get_br_needs_hierarchical(self):
"""
Need categories are hierarchical
"""
return self.br.get("needs_hierarchical", False)
def get_br_needs_org_specific(self):
"""
Need categories are specific per root organisation
"""
return self.br.get("needs_org_specific", True)
def get_br_id_card_layout(self):
"""
Layout class for beneficiary ID cards
"""
return self.br.get("id_card_layout")
def get_br_id_card_export_roles(self):
"""
User roles permitted to export beneficiary ID cards
"""
return self.br.get("id_card_export_roles")
def get_br_case_hide_default_org(self):
"""
Hide the organisation field in cases if only one allowed
"""
return self.br.get("case_hide_default_org", True)
def get_br_case_manager(self):
"""
Assign cases to individual case managers (staff members)
"""
return self.br.get("case_manager", True)
def get_br_case_address(self):
"""
Document the current address of beneficiaries
"""
return self.br.get("case_address", False)
def get_br_case_language_details(self):
"""
Document languages that can be used when communicating with
a beneficiary
"""
return self.br.get("case_language_details", True)
def get_br_household_size(self):
"""
Track the number of persons per household (family)
- False = off
- True = manual
- "auto" = count family members automatically
"""
return self.br.get("household_size", "auto")
def get_br_case_contacts_tab(self):
"""
Case file use tab to track beneficiary contact information
"""
return self.br.get("case_contacts_tab", True)
def get_br_case_id_tab(self):
"""
Case file use tab to track identity documents
"""
return self.br.get("case_id_tab", False)
def get_br_case_family_tab(self):
"""
Case file use tab to track family members
"""
return self.br.get("case_family_tab", True)
def get_br_service_contacts(self):
"""
Enable case file tab to track service contacts
"""
return self.br.get("service_contacts", False)
def get_br_case_notes_tab(self):
"""
Use a simple notes journal in case files
"""
return self.br.get("case_notes_tab", False)
def get_br_case_photos_tab(self):
"""
Case file use tab to upload photos
NB image-component can also be reached by clicking on the
profile photo (or the placeholder, respectively)
"""
return self.br.get("case_photos_tab", False)
def get_br_case_documents_tab(self):
"""
Case file use tab to upload documents
"""
return self.br.get("case_documents_tab", True)
def get_br_case_include_activity_docs(self):
"""
Documents-tab of case files includes activity attachments
"""
return self.get_br_case_activity_documents() and \
self.br.get("case_include_activity_docs", True)
def get_br_case_include_group_docs(self):
"""
Documents-tab of case files includes case group attachments
"""
return self.br.get("case_include_group_docs", True)
def get_br_case_activities(self):
"""
Track case activities
"""
return self.br.get("case_activities", True)
def get_br_case_activity_manager(self):
"""
Assign case activities to individual staff members
"""
return self.br.get("case_activity_manager", True)
def get_br_case_activity_urgent_option(self):
"""
Expose features for urgent case activities ("emergencies")
"""
return self.br.get("case_activity_urgent_option", False)
def get_br_case_activity_need(self):
"""
Use need categories for case activities
"""
return self.br.get("case_activity_need", True)
def get_br_case_activity_subject(self):
"""
Have a subject line (title) for case activities
"""
return self.br.get("case_activity_subject", False)
def get_br_case_activity_need_details(self):
"""
Have a text field to document need details in case activities
"""
return self.br.get("case_activity_need_details", False)
def get_br_case_activity_status(self):
"""
Case activities have a status (and possibly an end date)
"""
return self.br.get("case_activity_status", True)
def get_br_case_activity_end_date(self):
"""
Show case activity end date in form
- True to show, "writable" to allow manual edit
"""
return self.br.get("case_activity_end_date", False)
def get_br_case_activity_updates(self):
"""
Use case activity update journal (inline-component)
"""
return self.br.get("case_activity_updates", False)
def get_br_case_activity_outcome(self):
"""
Show field to track outcomes of case activities (free-text)
"""
return self.br.get("case_activity_outcome", True)
def get_br_case_activity_documents(self):
"""
Case activities have attachments
"""
return self.br.get("case_activity_documents", False)
def get_br_manage_assistance(self):
"""
Track individual measures of assistance
"""
return self.br.get("manage_assistance", True)
def get_br_assistance_inline(self):
"""
Document assistance measures inline in activities
"""
return self.br.get("assistance_inline", True)
def get_br_assistance_tab(self):
"""
Document assistance measures on separate case file tab
"""
setting = self.br.get("assistance_tab")
if setting is None:
# Show the tab if managing assistance without activities
setting = self.get_br_manage_assistance() and \
not self.get_br_case_activities()
return setting
def get_br_assistance_manager(self):
"""
Assign assistance measures to individual staff members
"""
return self.br.get("assistance_manager", True)
def get_br_assistance_types(self):
"""
Use assistance type categories
"""
return self.br.get("assistance_types", True)
def get_br_assistance_themes(self):
"""
Use assistance theme categories
"""
return self.br.get("assistance_themes", False)
def get_br_assistance_themes_org_specific(self):
"""
Assistance themes are specific per root organisation
"""
return self.br.get("assistance_themes_org_specific", True)
def get_br_assistance_themes_sectors(self):
"""
Assistance themes are organized by org sector
"""
return self.br.get("assistance_themes_sectors", False)
def get_br_assistance_themes_needs(self):
"""
Assistance themes are linked to needs
"""
return self.br.get("assistance_themes_needs", False)
def get_br_assistance_measures_use_time(self):
"""
Assistance measures use date+time (instead of just date)
"""
return self.br.get("assistance_measures_use_time", False)
def get_br_assistance_measure_default_closed(self):
"""
Set default status of assistance measures to closed
(useful if the primary use-case is post-action documentation)
"""
return self.br.get("assistance_measure_default_closed", False)
def get_br_assistance_details_per_theme(self):
"""
Document assistance measure details per theme
- requires assistance tab
"""
return self.get_br_assistance_tab() and \
self.br.get("assistance_details_per_theme", False)
def get_br_assistance_activity_autolink(self):
"""
Auto-link assistance details to case activities
- requires case_activity_need
- requires assistance_themes and assistance_themes_needs
- requires assistance_tab and assistance_details_per_theme
"""
return self.br.get("assistance_activity_autolink", False)
def get_br_assistance_track_effort(self):
"""
Track effort (=hours spent) for assistance measures
"""
return self.br.get("assistance_track_effort", True)
# -------------------------------------------------------------------------
# CAP: Common Alerting Protocol
#
def get_cap_identifier_oid(self):
"""
OID for the CAP issuing authority
"""
# See if the User has an Org-specific OID
auth = current.auth
if auth.user and auth.user.organisation_id:
table = current.s3db.org_organisation_tag
query = ((table.organisation_id == auth.user.organisation_id) & \
(table.tag == "cap_oid"))
record = current.db(query).select(table.value,
limitby=(0, 1)
).first()
if record and record.value:
return record.value
# Else fallback to the default OID
return self.cap.get("identifier_oid", "")
def get_cap_info_effective_period(self):
"""
The period (in days) after which alert info segments expire
"""
return self.cap.get("info_effective_period", 2)
def get_cap_codes(self):
"""
Default codes for CAP alerts
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("codes", [])
def get_cap_event_codes(self):
"""
Default alert codes for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("event_codes", [])
def get_cap_parameters(self):
"""
Default parameters for CAP info segments
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("parameters", [])
def get_cap_geocodes(self):
"""
Default geocodes.
should return a list of dicts:
[ {"key": "<ValueName>, "value": "<Value>",
"comment": "<Help string>", "mutable": True|False},
...]
"""
return self.cap.get("geocodes", [])
def get_cap_base64(self):
"""
Should CAP resources be base64 encoded and embedded in the alert message?
"""
return self.cap.get("base64", False)
def get_cap_languages(self):
"""
Languages for CAP info segments.
This gets filled in the drop-down for selecting languages.
These values should conform to RFC 3066.
For a full list of languages and their codes, see:
http://www.i18nguy.com/unicode/language-identifiers.html
"""
return self.cap.get("languages", OrderedDict([("ar", "Arabic"),
("en-US", "English"),
("es", "Spanish"),
("fr", "French"),
("pt", "Portuguese"),
("ru", "Russian"),
]))
def get_cap_authorisation(self):
"""
Authorisation setting whether to display "Submit for Approval" Button
"""
return self.cap.get("authorisation", True)
def get_cap_restrict_fields(self):
"""
Whether to restrict fields for update, cancel or error of alerts
"""
return self.cap.get("restrict_fields", False)
def get_cap_post_to_twitter(self):
"""
Whether to post the alerts in twitter
@ToDo: enhance this by as well as True,
being able to specify a specific Twitter channel to tweet on
"""
return self.cap.get("post_to_twitter", False)
def get_cap_same_code(self):
"""
Name of the tag that will be used to lookup in the gis_location_tag
to extract location_id for the alert
"""
return self.cap.get("same_code")
def get_cap_post_to_facebook(self):
"""
Whether to post the alerts in facebook
"""
return self.cap.get("post_to_facebook", False)
def get_cap_rss_use_links(self):
"""
Whether to use links of entry element if link fail
"""
return self.cap.get("rss_use_links", False)
def get_cap_use_ack(self):
"""
Whether CAP Alerts have workflow for Acknowledgement
"""
return self.cap.get("use_ack", False)
def get_cap_alert_hub_title(self):
"""
Title for the Alert Hub Page
"""
return self.cap.get("alert_hub_title", current.T("SAMBRO Alert Hub Common Operating Picture"))
def get_cap_area_default(self):
"""
During importing from XML, which element(s) to use for the
record in cap_area_location table
elements are <polygon> and <geocode>
"""
return self.cap.get("area_default", ["geocode", "polygon"])
# -------------------------------------------------------------------------
# CMS: Content Management System
#
def get_cms_bookmarks(self):
"""
Whether to allow users to bookmark Posts in News feed
"""
return self.cms.get("bookmarks", False)
def get_cms_filter_open(self):
"""
Whether the filter form on the Newsfeed should default to Open or Closed
"""
return self.cms.get("filter_open", False)
def get_cms_location_click_filters(self):
"""
Whether clicking on a location in the Newsfeed should activate
the filter to that location, instead of opening the profile page
"""
return self.cms.get("location_click_filters", False)
def get_cms_organisation(self):
"""
Which field to use for the Organisation of Posts:
* None
* created_by$organisation_id
* post_organisation.organisation_id
"""
return self.cms.get("organisation", "created_by$organisation_id")
def get_cms_organisation_group(self):
"""
Which field to use for the Organisation Group of Posts:
* None
* created_by$org_group_id
* post_organisation_group.group_id
"""
return self.cms.get("organisation_group")
def get_cms_person(self):
"""
Which field to use for the Author of Posts:
* None
* created_by
* person_id
"""
return self.cms.get("person", "created_by")
def get_cms_richtext(self):
"""
Whether to use RichText editor in News feed
"""
return self.cms.get("richtext", False)
def get_cms_show_events(self):
"""
Whether to show Events in News Feed
"""
return self.cms.get("show_events", False)
def get_cms_show_attachments(self):
"""
Whether to show Attachments (such as Sources) in News Feed
"""
return self.cms.get("show_attachments", True)
def get_cms_show_links(self):
"""
Whether to show Links (such as Sources) in News Feed
"""
return self.cms.get("show_links", False)
def get_cms_show_tags(self):
"""
Whether to show Tags in News Feed
"""
return self.cms.get("show_tags", False)
def get_cms_show_titles(self):
"""
Whether to show post Titles in News Feed
"""
return self.cms.get("show_titles", False)
def get_cms_hide_index(self, module):
"""
Whether to hide CMS from module index pages, can be configured
either as boolean, or as dict per module (with "_default" to
define the default behavior).
"""
hide = self.cms.get("hide_index", {})
if isinstance(hide, dict):
for m in (module, "_default"):
if m in hide:
return hide[m]
return False
else:
return hide
# -------------------------------------------------------------------------
# Shelters
#
def get_cr_day_and_night(self):
"""
Whether Shelter Capacities/Registrations are different for Day and Night
"""
return self.cr.get("day_and_night", False)
def get_cr_shelter_people_registration(self):
"""
Enable functionality to track individuals in shelters
"""
return self.cr.get("people_registration", True)
def get_cr_shelter_population_dynamic(self):
"""
Whether Shelter Population should be done manually (False)
or automatically based on the registrations (True)
and displaying all fields used by the automatic evaluation of current
shelter population:
"available_capacity_day",
"available_capacity_night",
"population_day",
"population_night".
"""
if not self.get_cr_shelter_people_registration():
# Only relevant when using people registration
return False
return self.cr.get("shelter_population_dynamic", False)
def get_cr_shelter_housing_unit_management(self):
"""
Enable the use of tab "Housing Unit" and enable the housing unit
selection during client registration.
"""
return self.cr.get("shelter_housing_unit_management", False)
def get_cr_check_out_is_final(self):
"""
Whether checking out of a Shelter frees up the place or is just leaving the site temporarily
"""
return self.cr.get("check_out_is_final", True)
def get_cr_tags(self):
"""
Whether Shelters should show a Tags tab
"""
return self.cr.get("tags", False)
def get_cr_shelter_inspection_tasks(self):
"""
Generate tasks from shelter inspections (requires project module)
"""
if self.has_module("project"):
return self.cr.get("shelter_inspection_tasks", False)
else:
return False
def get_cr_shelter_inspection_task_active_statuses(self):
"""
List of active statuses of shelter inspection tasks
(subset of project_task_status_opts)
"""
default = (1, 2, 3, 4, 5, 6, 11)
return self.cr.get("shelter_inspection_tasks_active_statuses", default)
def get_cr_shelter_inspection_task_completed_status(self):
"""
Completed-status for shelter inspection tasks (one value
of project_task_status_opts), will be set when inspection
flag is marked as resolved
"""
return self.cr.get("shelter_inspection_tasks_completed_status", 12)
# -------------------------------------------------------------------------
# DC: Data Collection
#
def get_dc_mobile_data(self):
"""
Whether Mobile Clients should download Assessments (Data not just Forms)
- e.g. when these are created through Targetting
"""
return self.dc.get("mobile_data", False)
def get_dc_mobile_inserts(self):
"""
Whether Mobile Clients should create Assessments locally
"""
return self.dc.get("mobile_inserts", True)
def get_dc_response_label(self):
"""
Label for Responses
- 'Assessment;
- 'Response' (default if set to None)
- 'Survey'
"""
return self.dc.get("response_label", "Assessment")
def get_dc_response_mobile(self):
"""
Whether Assessments are filled-out on the EdenMobile App
"""
return self.dc.get("response_mobile", True)
def get_dc_response_web(self):
"""
Whether Assessments are filled-out on the Web interface
"""
return self.dc.get("response_web", True)
def get_dc_target_status(self):
"""
Whether Assessment Targets have Statuses
"""
return self.dc.get("target_status", False)
def get_dc_unique_question_names_per_template(self):
"""
Deduplicate Questions by Name/Template
- needed for importing multiple translations
"""
return self.dc.get("unique_question_names_per_template", False)
def get_dc_likert_options(self):
"""
Likert Scales & Options
"""
return self.dc.get("likert_options", {1: ["Very appropriate",
"Somewhat appropriate",
"Neither appropriate nor inappropriate",
"Somewhat inappropriate",
"Very inappropriate",
],
2: ["Extremely confident",
"Very confident",
"Moderately confident",
"Slightly confident",
"Not confident at all",
],
3: ["Always",
"Often",
"Occasionally",
"Rarely",
"Never",
],
4: ["Extremely safe",
"Very safe",
"Moderately safe",
"Slightly safe",
"Not safe at all",
],
5: ["Very satisfied",
"Somewhat satisfied",
"Neither satisfied nor dissatisfied",
"Somewhat dissatisfied",
"Very dissatisfied",
],
6: ["smiley-1",
"smiley-2",
"smiley-3",
"smiley-4",
"smiley-6",
],
7: ["smiley-3",
"smiley-4",
"smiley-5",
],
})
# -------------------------------------------------------------------------
# Deployments
#
def get_deploy_alerts(self):
"""
Whether the system is used to send Alerts
"""
return self.__lazy("deploy", "alerts", default=True)
def get_deploy_cc_groups(self):
"""
List of Group names that are cc'd on Alerts
"""
return self.__lazy("deploy", "cc_groups", default=[])
def get_deploy_hr_label(self):
"""
Label for deployable Human Resources
e.g. 'Staff', 'Volunteer' (CERT), 'Member' (RDRT)
"""
return self.deploy.get("hr_label", "Staff")
def get_deploy_manual_recipients(self):
"""
Whether Alert recipients should be selected manually
"""
return self.deploy.get("manual_recipients", True)
def get_deploy_member_filters(self):
"""
Custom set of filter_widgets for members (hrm_human_resource),
used in custom methods for member selection, e.g. deploy_apply
or deploy_alert_select_recipients
"""
return self.__lazy("deploy", "member_filters", default=None)
def get_deploy_post_to_twitter(self):
"""
Whether to post the alerts in twitter
@ToDo: enhance this by as well as True,
being able to specify a specific Twitter channel to tweet on
"""
return self.deploy.get("post_to_twitter", False)
def get_deploy_responses_via_web(self):
"""
Whether Responses to Alerts come in via the Web
"""
return self.deploy.get("responses_via_web", True)
def get_deploy_select_ratings(self):
"""
Whether to have filters for Ratings when selecting deployables for an Alert
"""
return self.deploy.get("select_ratings", False)
def get_deploy_team_label(self):
"""
Label for deployable Team
e.g. 'RDRT', 'RIT'
"""
return self.deploy.get("team_label", "Deployable")
# -------------------------------------------------------------------------
# Disease Tracking and Monitoring
#
def get_disease_case_number(self):
"""
Use case numbers in disease tracking
"""
return self.disease.get("case_number", False)
def get_disease_case_id(self):
"""
Use personal ID (pe_label) in disease tracking
"""
return self.disease.get("case_id", True)
def get_disease_treatment(self):
"""
Use a treatment notes journal for cases
"""
return self.disease.get("treatment", False)
# -------------------------------------------------------------------------
# Doc Options
#
def get_doc_label(self):
"""
label for Document/Attachment
"""
return self.doc.get("label", "Document")
def get_doc_mailmerge_fields(self):
"""
Dictionary of mailmerge fields
- assumes starting from pr_person
"""
return self.doc.get("mailmerge_fields", {"First Name": "first_name",
"Last Name": "last_name",
"Date of Birth": "date_of_birth",
})
# -------------------------------------------------------------------------
# DVR Options
#
def get_dvr_label(self):
"""
Whether Cases are called Cases or Beneficiaries
- default: None = Case
- valid options: "Beneficiary"
"""
return self.dvr.get("label", None)
def get_dvr_case_flags(self):
"""
Enable features to manage case flags
"""
return self.dvr.get("case_flags", False)
def get_dvr_track_transfer_sites(self):
"""
Enable features to track transfer origin/destination sites
"""
return self.dvr.get("track_transfer_sites", False)
def get_dvr_transfer_site_types(self):
"""
Site types for case transfer origin/destination
"""
default = ("cr_shelter",
"org_office",
"org_facility",
)
return self.dvr.get("transfer_site_types", default)
def get_dvr_manage_transferability(self):
"""
Enable features to manage transferability of cases
"""
return self.dvr.get("manage_transferability", False)
def get_dvr_household_size(self):
"""
Register number of persons per household (family)
False = off
True = manual
"auto" = count family members automatically
"""
return self.dvr.get("household_size", False)
def get_dvr_mandatory_appointments(self):
"""
Expose flags to mark appointment types as mandatory
"""
return self.dvr.get("mandatory_appointments", False)
def get_dvr_case_events_close_appointments(self):
"""
Whether case events automatically close appointments
"""
return self.dvr.get("case_events_close_appointments", False)
def get_dvr_appointments_update_last_seen_on(self):
"""
Whether appointments which require presence shall
automatically update the "last seen on" date when
set to "completed"
"""
return self.dvr.get("appointments_update_last_seen_on", False)
def get_dvr_appointments_update_case_status(self):
"""
Whether appointments automatically update the case
status when set to "completed"
"""
return self.dvr.get("appointments_update_case_status", False)
def get_dvr_payments_update_last_seen_on(self):
"""
Whether payments (e.g. allowance) shall automatically update
the "last seen on" date when set to "paid"
"""
return self.dvr.get("payments_update_last_seen_on", False)
def get_dvr_id_code_pattern(self):
"""
A regular expression pattern to parse ID Codes (QR codes),
None to disable ID code parsing
Should return the following groups:
label the PE label, mandatory
family the PE label of the head of family, optional
first_name optional
last_name optional
date_of_birth optional
Example:
"(?P<label>[^,]*),(?P<first_name>[^,]*),(?P<last_name>[^,]*),(?P<date_of_birth>[^,]*)"
"""
return self.dvr.get("id_code_pattern", None)
def get_dvr_event_registration_checkin_warning(self):
"""
Warn during event registration when the person is currently
not checked-in
"""
return self.dvr.get("event_registration_checkin_warning", False)
def get_dvr_event_registration_show_picture(self):
"""
Event registration UI to show profile picture
by default (True), or only on demand (False):
- can be set to False (selectively) in order to improve
responsiveness of the UI and reduce network traffic
"""
return self.dvr.get("event_registration_show_picture", True)
def get_dvr_event_registration_exclude_codes(self):
"""
List of case event type codes to exclude from
the event registration UI; can use * as wildcard
Example:
settings.dvr.event_registration_exclude_codes = ("FOOD*",)
"""
return self.dvr.get("event_registration_exclude_codes", None)
def get_dvr_activity_use_service_type(self):
"""
Use service type in group/case activities
"""
return self.dvr.get("activity_use_service_type", False)
def get_dvr_activity_sectors(self):
"""
Use sectors in group/case activities
"""
return self.dvr.get("activity_sectors", False)
def get_dvr_case_activity_use_status(self):
"""
Use configurable statuses in case activities
instead of simple completed-flag
"""
return self.dvr.get("case_activity_use_status", False)
def get_dvr_case_activity_needs_multiple(self):
"""
Whether Case Activities link to Multiple Needs
- e.g. DRK: False
- e.g. STL: True
"""
return self.dvr.get("case_activity_needs_multiple", False)
def get_dvr_case_activity_follow_up(self):
"""
Enable/disable fields to schedule case activities for follow-up
"""
return self.__lazy("dvr", "case_activity_follow_up", default=True)
def get_dvr_case_include_activity_docs(self):
"""
Documents-tab of beneficiaries includes case activity attachments
"""
return self.dvr.get("case_include_activity_docs", False)
def get_dvr_case_include_group_docs(self):
"""
Documents-tab of beneficiaries includes case group attachments
"""
return self.dvr.get("case_include_group_docs", False)
def get_dvr_needs_use_service_type(self):
"""
Use service type in needs
"""
return self.dvr.get("needs_use_service_type", False)
def get_dvr_needs_hierarchical(self):
"""
Need types are hierarchical
"""
return self.dvr.get("needs_hierarchical", False)
def get_dvr_vulnerability_types_hierarchical(self):
"""
Vulnerability types are hierarchical
"""
return self.dvr.get("vulnerability_types_hierarchical", False)
def get_dvr_manage_response_actions(self):
"""
Manage individual response actions in case activities
"""
return self.dvr.get("manage_response_actions", False)
def get_dvr_response_planning(self):
"""
Response actions can be planned
(as opposed to being documented in hindsight)
"""
return self.__lazy("dvr", "response_planning", default=False)
def get_dvr_response_due_date(self):
"""
Response planning uses separate due-date field
"""
return self.get_dvr_response_planning() and \
self.__lazy("dvr", "response_due_date", default=False)
def get_dvr_response_use_time(self):
"""
Use date+time for responses rather than just date
"""
return self.__lazy("dvr", "response_use_time", default=False)
def get_dvr_response_types(self):
"""
Use response type categories
"""
return self.__lazy("dvr", "response_types", default=True)
def get_dvr_response_types_hierarchical(self):
"""
Response types are hierarchical
"""
return self.dvr.get("response_types_hierarchical", False)
def get_dvr_response_themes(self):
"""
Use themes for response actions
"""
return self.dvr.get("response_themes", False)
def get_dvr_response_themes_org_specific(self):
"""
Response themes are org-specific
"""
return self.dvr.get("response_themes_org_specific", True)
def get_dvr_response_themes_sectors(self):
"""
Response themes are organized per org sector
"""
return self.__lazy("dvr", "response_themes_sectors", default=False)
def get_dvr_response_themes_needs(self):
"""
Response themes are linked to needs
"""
return self.__lazy("dvr", "response_themes_needs", default=False)
def get_dvr_response_themes_details(self):
"""
Record response details per theme
"""
return self.__lazy("dvr", "response_themes_details", default=False)
def get_dvr_response_activity_autolink(self):
"""
Automatically link response actions to case activities
based on matching needs
"""
return self.get_dvr_response_themes_needs() and \
self.__lazy("dvr", "response_activity_autolink", default=False)
# -------------------------------------------------------------------------
# Education
#
def get_edu_school_code_unique(self):
"""
Validate for Unique School Codes
"""
return self.edu.get("school_code_unique", False)
# -------------------------------------------------------------------------
# Events
#
def get_event_label(self):
"""
Whether Events are called Events or Disasters
- default: None = Event
- valid options: "Disaster"
"""
return self.event.get("label", None)
def get_event_incident(self):
"""
Whether Events have Incidents
"""
return self.event.get("incident", True)
def get_event_cascade_delete_incidents(self):
"""
Whether deleting an Event cascades to deleting all Incidents or whether it sets NULL
- 'normal' workflow is where an Event is created and within that various Incidents,
so cascading the delete makes sense here ("delete everything associated with this event")
- WA COP uses Events to group existing Incidents, so here we don't wish to delete the Incidents if the Event is deleted
NB Changing this setting requires a DB migration
"""
return self.event.get("cascade_delete_incidents", True)
def get_event_exercise(self):
"""
Whether Events can be Exercises
"""
return self.event.get("exercise", False)
def get_event_sitrep_dynamic(self):
"""
Whether the SitRep resource should include a Dynamic Table section
"""
return self.event.get("sitrep_dynamic", False)
def get_event_sitrep_edxl(self):
"""
Whether the SitRep resource should be configured for EDXL-Sitrep mode
"""
return self.event.get("sitrep_edxl", False)
def get_event_types_hierarchical(self):
"""
Whether Event Types are Hierarchical or not
"""
return self.event.get("types_hierarchical", False)
def get_incident_types_hierarchical(self):
"""
Whether Incident Types are Hierarchical or not
"""
return self.event.get("incident_types_hierarchical", False)
def get_event_task_notification(self):
"""
Whether to send Notifications for Tasks linked to Events
- only used in SaFiRe template currently
Options: None, contact_method (e.g. "SMS", "EMAIL")
"""
return self.event.get("task_notification", "EMAIL")
def get_event_dc_response_tab(self):
"""
Whether to show the DC response tab for events
"""
return self.event.get("dc_response_tab", True)
def get_event_dc_target_tab(self):
"""
Whether to show the DC target tab for events
"""
return self.event.get("dc_target_tab", True)
def get_event_dispatch_tab(self):
"""
Whether to show the dispatch tab for events
"""
if self.has_module("msg"):
return self.event.get("dispatch_tab", False)
else:
return False
def get_event_impact_tab(self):
"""
Whether to show the impact tab for events
"""
return self.event.get("impact_tab", True)
def get_incident_label(self):
"""
Whether Incidents are called Incidents or Tickets
- default: None = Incident
- valid options: "Ticket"
"""
return self.event.get("incident_label", None)
def get_incident_dispatch_tab(self):
"""
Whether to show the dispatch tab for incidents
"""
if self.has_module("msg"):
return self.event.get("incident_dispatch_tab", True)
else:
return False
def get_incident_impact_tab(self):
"""
Whether to show the impact tab for incidents
"""
return self.event.get("incident_impact_tab", False)
def get_incident_teams_tab(self):
"""
Show tab with teams assigned for incidents, string to
define the label of the tab or True to use default label
"""
return self.event.get("incident_teams_tab", False)
# -------------------------------------------------------------------------
# Fire
#
def get_fire_station_code_unique(self):
"""
Whether Fire Station code is unique
"""
return self.fire.get("fire_station_unique", False)
# -------------------------------------------------------------------------
# Hospital Registry
#
def get_hms_track_ctc(self):
return self.hms.get("track_ctc", False)
def get_hms_activity_reports(self):
return self.hms.get("activity_reports", False)
# -------------------------------------------------------------------------
# Human Resource Management
def get_hrm_course_grades(self):
"""
Grade options for Courses
NB Best to keep Pass/Fail on these numbers but can add additional values if-required, e.g.:
{0: T("No Show"),
1: T("Left Early"),
8: T("Pass"),
9: T("Fail"),
}
"""
T = current.T
return self.__lazy("hrm", "course_grades", default={8: T("Pass"),
9: T("Fail"),
})
def get_hrm_course_pass_marks(self):
"""
Whether the Pass Mark for a course is defined by the Grade Details
"""
return self.hrm.get("course_pass_marks", False)
def get_hrm_course_types(self):
"""
Which Types to use for Courses
- allow all by default for prepop
"""
T = current.T
return self.__lazy("hrm", "course_types", default={1: T("Staff"),
2: T("Volunteers"),
3: T("Deployables"),
4: T("Members"),
})
def get_hrm_event_course_mandatory(self):
"""
Whether (Training) Events have a Mandatory Course
"""
return self.__lazy("hrm", "event_course_mandatory", default=True)
#def get_hrm_event_programme(self):
# """
# Whether (Training) Events should be linked to Programmes
# """
# return self.__lazy("hrm", "event_programme", default=False)
def get_hrm_event_site(self):
"""
How (Training) Events should be Located:
- True: use Site
- False: use Location (e.g. Country or Country/L1)
"""
return self.__lazy("hrm", "event_site", default=True)
def get_hrm_staff_label(self):
"""
Label for 'Staff'
e.g. 'Contacts'
"""
return current.T(self.hrm.get("staff_label", "Staff"))
def get_hrm_organisation_label(self):
"""
Label for Organisations in Human Resources
"""
label = self.hrm.get("organisation_label")
if not label:
if self.get_org_branches():
label = "Organization / Branch"
else:
label = "Organization"
return current.T(label)
def get_hrm_root_organisation_label(self):
"""
Label for Root Organisations in Human Resources
"""
return current.T(self.hrm.get("root_organisation_label", "Top-level Organization"))
def get_hrm_email_required(self):
"""
If set to True then Staff & Volunteers require an email address
NB Currently this also acts on Members & Beneficiaries!
"""
return self.hrm.get("email_required", True)
def get_hrm_location_staff(self):
"""
What to use to position Staff on the Map when not Tracking them
- valid options are:
"site_id" - Use the HR's Site Location
"person_id" - Use the HR's Person Location (i.e. Home Address)
("person_id", "site_id") - Use the HR's Person Location if-available, fallback to the Site if-not
("site_id","person_id") - Use the HR's Site Location if-available, fallback to the Person's Home Address if-not
NB This is read onaccept of editing Home Addresses & Assigning Staff to Sites so is not a fully-dynamic change
- onaccept is used for performance (avoiding joins)
"""
return self.hrm.get("location_staff", "site_id")
def get_hrm_location_vol(self):
"""
What to use to position Volunteers on the Map when not Tracking them
- valid options are:
"site_id" - Use the HR's Site Location
"person_id" - Use the HR's Person Location (i.e. Home Address)
("person_id", "site_id") - Use the HR's Person Location if-available, fallback to the Site if-not
("site_id", "person_id") - Use the HR's Site Location if-available, fallback to the Person's Home Address if-not
NB This is read onaccept of editing Home Addresses & Assigning Volunteers to Sites so is not a fully-dynamic change
- onaccept is used for performance (avoiding joins)
"""
return self.hrm.get("location_vol", "person_id")
def get_hrm_multiple_contracts(self):
"""
Whether Staff have multiple contracts recorded
"""
return self.__lazy("hrm", "multiple_contracts", default=False)
def get_hrm_org_dependent_job_titles(self):
"""
If set to True then the Job Titles Catalog is Organisation-dependent (i.e. each root org sees a different Catalog)
"""
return self.hrm.get("org_dependent_job_titles", False)
def get_hrm_org_required(self):
"""
If set to True then Staff & Volunteers require an Organisation
"""
return self.hrm.get("org_required", True)
def get_hrm_multiple_orgs(self):
"""
True: Human Resources are being managed across multiple Organisations
False: Human Resources are only being managed internally within a single Organisation with no Branches
"""
return self.hrm.get("multiple_orgs", True)
def get_hrm_compose_button(self):
"""
If set to True then HRM dataTables have a 'Send Message' button
if the messaging module is enabled & users have the permission to access hrm/compose
"""
return self.hrm.get("compose_button", True)
def get_hrm_deletable(self):
"""
If set to True then HRM records are deletable rather than just being able to be marked as obsolete
"""
return self.hrm.get("deletable", True)
def get_hrm_event_types(self):
"""
Whether (Training) Events should be of different Types
"""
return self.__lazy("hrm", "event_types", default=False)
def get_hrm_id_cards(self):
"""
Show buttons to download printable ID cards for staff/volunteers
"""
return self.__lazy("hrm", "id_cards", default=False)
def get_hrm_job_title_deploy(self):
"""
Whether the 'deploy' Job Title type should be used
"""
job_title_deploy = self.hrm.get("job_title_deploy", None)
if job_title_deploy is None:
job_title_deploy = self.has_module("deploy")
return job_title_deploy
def get_hrm_multiple_job_titles(self):
"""
If set to True then HRs can have multiple Job Titles
"""
return self.hrm.get("multi_job_titles", False)
def get_hrm_show_staff(self):
"""
If set to True then show 'Staff' options when HRM enabled
- needs a separate setting as vol requires hrm, but we may only wish to show Volunteers
"""
return self.hrm.get("show_staff", True)
def get_hrm_mix_staff(self):
"""
If set to True then Staff and Volunteers are shown together
"""
return self.hrm.get("mix_staff", False)
def get_hrm_site_contact_unique(self):
"""
Whether there can be multiple site contacts per site
- disable this if needing a separate contact per sector
"""
return self.hrm.get("site_contact_unique", True)
def get_hrm_skill_types(self):
"""
If set to True then Skill Types are exposed to the UI
- each skill_type needs it's own set of competency levels
If set to False then Skill Types are hidden from the UI
- all skills use the same skill_type & hence the same set of competency levels
"""
return self.hrm.get("skill_types", False)
def get_hrm_staff_departments(self):
"""
Whether Staff should use Departments
"""
return self.__lazy("hrm", "staff_departments", default=True)
def get_hrm_staff_experience(self):
"""
Whether to use Experience for Staff &, if so, which table to use
- options are: False, "experience", "missions", "both"
"""
return self.hrm.get("staff_experience", "experience")
def get_hrm_salary(self):
"""
Whether to track salaries of staff
"""
return self.hrm.get("salary", False)
def get_hrm_show_organisation(self):
"""
Whether Human Resource representations should include the Organisation
"""
return self.hrm.get("show_organisation", False)
def get_hrm_teams(self):
"""
Whether Human Resources should use Teams
& what to call them (Teams or Groups currently supported)
"""
return self.__lazy("hrm", "teams", default="Teams")
def get_hrm_teams_orgs(self):
"""
Whether Human Resource Teams should link to Organisations
& whether this is a Single Org or Multiple Orgs
Options:
None: disable link
1: single Org
2: multiple Orgs
"""
return self.__lazy("hrm", "teams_orgs", default=1)
def get_hrm_trainings_external(self):
"""
Whether Training Courses should be split into Internal & External
"""
return self.__lazy("hrm", "trainings_external", default=False)
def get_hrm_cv_tab(self):
"""
Whether Human Resources should consolidate tabs into 1x CV page:
* Awards
* Education
* Experience
* Training
* Skills
"""
return self.hrm.get("cv_tab", False)
def get_hrm_record_tab(self):
"""
Whether or not to show the HR record as tab, and with which
method:
True = show the default tab (human_resource)
"record" = consolidate tabs into 1x CV page:
* Staff Record
* Group Membership
False = do not show the tab (e.g. when HR record is inline)
"""
return self.hrm.get("record_tab", True)
def get_hrm_record_label(self):
"""
Label to use for the HR record tab
- string not LazyT
"""
label = self.__lazy("hrm", "record_label", default=None)
if not label:
if current.request.controller == "vol":
label = "Volunteer Record"
elif self.get_hrm_mix_staff():
label = "Staff/Volunteer Record"
else:
label = "Staff Record"
return label
def get_hrm_use_awards(self):
"""
Whether Volunteers should use Awards
"""
return self.hrm.get("use_awards", True)
def get_hrm_use_certificates(self):
"""
Whether Human Resources should use Certificates
"""
return self.__lazy("hrm", "use_certificates", default=True)
def get_hrm_create_certificates_from_courses(self):
"""
If set Truthy then Certificates are created automatically for each Course
True: Create Certificates without an organisation_id
"organisation_id": Create Certificates with the organisation_id of the Course
"""
return self.hrm.get("create_certificates_from_courses", False)
def get_hrm_filter_certificates(self):
"""
If set to True then Certificates are filtered by (Root) Organisation
& hence certificates from other Organisations cannot be added to an HR's profile (except by Admins)
"""
return self.hrm.get("filter_certificates", False)
def get_hrm_use_address(self):
"""
Whether Human Resources should show address tab
"""
use_address = self.hrm.get("use_address", None)
# Fall back to PR setting if not specified
if use_address is None:
use_address = self.get_pr_use_address()
return use_address
def get_hrm_use_code(self):
"""
Whether Human Resources should use Staff/Volunteer IDs,
either True or False, or "staff" to use code for staff
only
"""
return self.__lazy("hrm", "use_code", default=False)
def get_hrm_use_credentials(self):
"""
Whether Human Resources should use Credentials
"""
return self.hrm.get("use_credentials", True)
def get_hrm_use_description(self):
"""
Whether Human Resources should use Physical Description
and what the name of the Tab should be.
Set to None to disable
"""
return self.hrm.get("use_description", "Description")
def get_hrm_use_education(self):
"""
Whether Human Resources should show Education
"""
return self.hrm.get("use_education", False)
def get_hrm_use_id(self):
"""
Whether Human Resources should show ID Tab
"""
return self.hrm.get("use_id", True)
def get_hrm_use_job_titles(self):
"""
Whether Human Resources should show Job Titles
"""
return self.hrm.get("use_job_titles", True)
def get_hrm_use_medical(self):
"""
Whether Human Resources should use Medical Information tab
and what the name of the Tab should be.
Set to None to disable
"""
return self.hrm.get("use_medical", None)
def get_hrm_use_national_id(self):
"""
Whether Human Resources should show National IDs in list_fields
& text_search_fields
either True or False
"""
return self.__lazy("hrm", "use_national_id", default=False)
def get_hrm_use_skills(self):
"""
Whether Human Resources should use Skills
"""
return self.__lazy("hrm", "use_skills", default=True)
def get_hrm_certificate_skill(self):
"""
Whether Human Resources should use Skill Equivalence for Certificates
"""
return self.__lazy("hrm", "certificate_skill", default=False)
def get_hrm_use_trainings(self):
"""
Whether Human Resources should use Trainings
"""
return self.hrm.get("use_trainings", True)
def get_hrm_training_instructors(self):
"""
How training instructors are managed:
None: Don't track instructors at all
internal: Use persons from the registry
external: Just use free-text Names
both: Use both fields
multiple: Use multiple persons from the registry
"""
return self.__lazy("hrm", "training_instructors", "external")
def get_hrm_training_filter_and(self):
"""
How people are filtered based on their Trainings:
False (default): Std options filter where we do an OR
- i.e. we see all people who have done either (or both) Course A or Course B
True: Contains options filter (basically an AND)
- i.e. we see only people who have done both Course A and Course B
"""
return self.__lazy("hrm", "training_filter_and", False)
def get_hrm_activity_types(self):
"""
HRM Activity Types (for experience record),
a dict {"code": "label"}, None to deactivate (default)
"""
return self.hrm.get("activity_types")
def get_hrm_vol_active(self):
"""
Whether to use a 'Active' field for Volunteers &, if so, whether
this is set manually or calculated by a function
- options are: False, True or a function
NB If using a function, put this inside a Lazy lookup
"""
return self.__lazy("hrm", "vol_active", default=False)
def get_hrm_vol_active_tooltip(self):
"""
The tooltip to show when viewing the Active status in the Volunteer RHeader
"""
return self.hrm.get("vol_active_tooltip")
#def get_hrm_vol_affiliation(self):
# """
# Which affiliation type Volunteers use:
# 1 = Organisational Unit (=> Hierarchy)
# 9 = 'Other Role'
# None = default ('Other Role')
# """
# return self.__lazy("hrm", "vol_affiliation", default=None)
def get_hrm_vol_availability_tab(self):
"""
@ToDo: Deprecate
Whether to use Availability Tab for Volunteers
Options:
None
True
"""
return self.__lazy("hrm", "vol_availability_tab", default=None)
def get_hrm_unavailability(self):
"""
Whether to use Unavailability for Staff/Volunteers
- shows tab/profile widget
- adds filter
Options:
None
True
"""
return self.__lazy("hrm", "unavailability", default=None)
def get_hrm_vol_experience(self):
"""
Whether to use Experience for Volunteers &, if so, which table to use
- options are: False, "experience", "activity", "programme" or "both"
"""
return self.__lazy("hrm", "vol_experience", default="programme")
def get_hrm_vol_departments(self):
"""
Whether Volunteers should use Departments
"""
return self.__lazy("hrm", "vol_departments", default=False)
def get_hrm_vol_roles(self):
"""
Whether Volunteers should use Roles
"""
return self.__lazy("hrm", "vol_roles", default=True)
def get_hrm_vol_service_record_manager(self):
"""
What should be put into the 'Manager' field of the Volunteer Service Record
"""
return self.__lazy("hrm", "vol_service_record_manager",
default=current.T("Branch Coordinator"))
def get_hrm_delegation_workflow(self):
"""
The type of workflow used for delegations:
- "Application": the person applies for the delegation
- "Request" : the receiving org requests the delegation
"""
return self.hrm.get("delegation_workflow", "Request")
# -------------------------------------------------------------------------
# Inventory Management Settings
#
def get_inv_collapse_tabs(self):
return self.inv.get("collapse_tabs", True)
def get_inv_facility_label(self):
return self.inv.get("facility_label", "Warehouse")
def get_inv_facility_manage_staff(self):
"""
Show Staff Management Tabs for Facilities in Inventory Module
"""
return self.inv.get("facility_manage_staff", True)
def get_inv_recv_tab_label(self):
label = self.inv.get("recv_tab_label")
if not label:
if self.get_inv_shipment_name() == "order":
label = "Orders"
else:
label = "Receive"
return label
def get_inv_send_tab_label(self):
return self.inv.get("send_tab_label", "Send")
def get_inv_direct_stock_edits(self):
"""
Can Stock levels be adjusted directly?
- defaults to False
"""
return self.inv.get("direct_stock_edits", False)
def get_inv_org_dependent_warehouse_types(self):
"""
Whether Warehouse Types vary by Organisation
"""
return self.inv.get("org_dependent_warehouse_types", False)
def get_inv_send_show_mode_of_transport(self):
"""
Show mode of transport on Sent Shipments
"""
return self.inv.get("show_mode_of_transport", False)
def get_inv_send_show_org(self):
"""
Show Organisation on Sent Shipments
"""
return self.inv.get("send_show_org", True)
def get_inv_send_show_time_in(self):
"""
Show Time In on Sent Shipments
"""
return self.inv.get("send_show_time_in", False)
def get_inv_stock_count(self):
"""
Call Stock Adjustments 'Stock Counts'
"""
return self.inv.get("stock_count", True)
def get_inv_track_pack_values(self):
"""
Whether or not Pack values are tracked
"""
return self.inv.get("track_pack_values", True)
def get_inv_item_status(self):
"""
Item Statuses which can also be Sent Shipment Types
"""
T = current.T
return self.inv.get("item_status", {0: current.messages["NONE"], # Only Items with this Status can be allocated to shipments
1: T("Dump"),
2: T("Sale"),
3: T("Reject"),
4: T("Surplus")
})
def get_inv_shipment_name(self):
"""
Get the name of Shipments
- currently supported options are:
* shipment
* order
"""
return self.inv.get("shipment_name", "shipment")
def get_inv_shipment_types(self):
"""
Shipment types which are common to both Send & Receive
"""
return self.inv.get("shipment_types", {
0 : current.messages["NONE"],
11: current.T("Internal Shipment"),
})
def get_inv_send_types(self):
"""
Shipment types which are just for Send
"""
return self.inv.get("send_types", {21: current.T("Distribution"),
})
def get_inv_send_type_default(self):
"""
Which Shipment type is default
"""
return self.inv.get("send_type_default", 0)
def get_inv_recv_types(self):
"""
Shipment types which are just for Receive
"""
T = current.T
return self.inv.get("recv_types", {#31: T("Other Warehouse"), Same as Internal Shipment
32: T("Donation"),
#33: T("Foreign Donation"),
34: T("Purchase"),
})
def get_inv_send_form_name(self):
return self.inv.get("send_form_name", "Waybill")
def get_inv_send_ref_field_name(self):
return self.inv.get("send_ref_field_name", "Waybill Number")
def get_inv_send_shortname(self):
return self.inv.get("send_shortname", "WB")
def get_inv_recv_form_name(self):
return self.inv.get("recv_form_name", "Goods Received Note")
def get_inv_recv_shortname(self):
return self.inv.get("recv_shortname", "GRN")
def get_inv_warehouse_code_unique(self):
"""
Validate for Unique Warehouse Codes
"""
return self.inv.get("warehouse_code_unique", False)
# -------------------------------------------------------------------------
# IRS
#
def get_irs_vehicle(self):
"""
Use Vehicles to respond to Incident Reports?
"""
return self.irs.get("vehicle", False)
# -------------------------------------------------------------------------
# Members
#
def get_member_cv_tab(self):
"""
Whether Members should consolidate tabs into 1x CV page:
* Awards
* Education
* Experience
* Training
* Skills
"""
return self.member.get("cv_tab", False)
def get_member_membership_types(self):
"""
Whether to have Membership Types
"""
return self.__lazy("member", "membership_types", default=True)
# -------------------------------------------------------------------------
# Mobile Forms
#
def get_mobile_forms(self):
"""
Configure mobile forms - a list of items, or a callable accepting
a auth_masterkey.id as parameter and returning a list of items.
Item formats:
"tablename"
("Title", "tablename")
("Title", "tablename", options)
Format for options:
{
name = name, ...form name (optional)
c = controller, ...use this controller for form handling
f = function, ...use this function for form handling
vars = vars, ...add these vars to the download URL
}
Example:
settings.mobile.forms = [("Request", "req_req")]
"""
return self.mobile.get("forms", [])
def get_mobile_dynamic_tables(self):
"""
Expose mobile forms for dynamic tables
"""
return self.mobile.get("dynamic_tables", True)
def get_mobile_masterkey_filter(self):
"""
Filter mobile forms by master key
"""
return self.mobile.get("masterkey_filter", False)
# -------------------------------------------------------------------------
# Organisations
#
def get_org_autocomplete(self):
"""
Whether organisation_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("autocomplete", False)
def get_org_default_organisation(self):
"""
If the system is only used by a single Organisation then this can be defaulted/hidden
- if-appropriate can also use lazy settings to set this from the user.organisation_id
"""
default_organisation = self.__lazy("org", "default_organisation", default=None)
if default_organisation:
if not isinstance(default_organisation, INTEGER_TYPES):
# Check Session cache
default_organisation_id = current.session.s3.default_organisation_id
if default_organisation_id:
default_organisation = default_organisation_id
else:
# Convert Name to ID
table = current.s3db.org_organisation
org = current.db(table.name == default_organisation).select(table.id,
limitby=(0, 1),
).first()
try:
default_organisation = org.id
except AttributeError:
# Prepop not done?
current.log.error("Default Organisation not found: %s" % default_organisation)
default_organisation = None
else:
# Cache
current.session.s3.default_organisation_id = default_organisation
return default_organisation
def get_org_default_site(self):
"""
If the system is only used by a single Site then this can be defaulted/hidden
- if-appropriate can also use lazy settings to set this from the user.site_id
"""
default_site = self.org.get("default_site", None)
if default_site:
if not isinstance(default_site, INTEGER_TYPES):
# Check Session cache
default_site_id = current.session.s3.default_site_id
if default_site_id:
default_site = default_site_id
else:
# Convert Name to ID
table = current.s3db.org_site
site = current.db(table.name == default_site).select(table.site_id,
limitby=(0, 1),
).first()
try:
default_site = site.site_id
except AttributeError:
# Prepop not done?
current.log.error("Default Site not found: %s" % default_site)
default_site = None
else:
# Cache
current.session.s3.default_site_id = default_site
return default_site
def get_org_country(self):
"""
Whether to expose the "country" field of organisations
"""
return self.org.get("country", True)
def get_org_sector(self):
"""
Whether to use an Organization Sector field
"""
return self.org.get("sector", False)
def get_org_sector_rheader(self):
"""
Whether Sectors should be visible in the rheader
"""
return self.org.get("sector_rheader", self.get_org_sector())
def get_org_branches(self):
"""
Whether to support Organisation Branches or not
"""
return self.org.get("branches", False)
def get_org_branches_tree_view(self):
"""
Show branches of an organisation as tree rather than as table
"""
return self.org.get("branches_tree_view", False)
def get_org_facility_types_hierarchical(self):
"""
Whether Facility Types are Hierarchical or not
"""
return self.org.get("facility_types_hierarchical", False)
def get_org_organisation_location_context(self):
"""
The Context to use for displaying Organisation Locations
- defaults to the Organisation's Sites
- can also set to "organisation_location.location_id"
"""
return self.org.get("organisation_location_context", "site.location_id")
def get_org_organisation_types_hierarchical(self):
"""
Whether Organisation Types are Hierarchical or not
"""
return self.org.get("organisation_types_hierarchical", False)
def get_org_organisation_types_multiple(self):
"""
Whether Organisation Types are Multiple or not
"""
return self.org.get("organisation_types_multiple", False)
def get_org_organisation_type_rheader(self):
"""
Whether Organisation Types are visible in the rheader
"""
return self.org.get("organisation_type_rheader", False)
def get_org_facilities_tab(self):
"""
Whether to show a Tab for Facilities on Organisations
"""
return self.org.get("facilities_tab", True)
def get_org_facility_shifts(self):
"""
Whether to show a Tab for Shifts on Offices & Facilities
"""
return self.org.get("facility_shifts", True)
def get_org_groups(self):
"""
Whether to support Organisation Groups or not
& what their name is:
'Coalition'
'Network'
NB If changing this after 1st_run then need to
create the OrgGroupAdmin role manually if-needed
"""
return self.org.get("groups", False)
def get_org_group_team_represent(self):
"""
Whether to represent org_group affiliation in team
references (pr_group_id)
"""
return self.org.get("group_team_represent", False)
def get_org_pdf_card_configs(self):
"""
Show a tab in organisation rheader to manage PDF card configurations
"""
return self.__lazy("org", "pdf_card_configs", default=False)
def get_org_documents_tab(self):
"""
Whether to show a Tab for Documents
"""
return self.org.get("documents_tab", False)
def get_org_needs_tab(self):
"""
Whether to show a Tab for Organisation Needs
"""
return self.org.get("needs_tab", False)
def get_org_offices_tab(self):
"""
Whether to show a Tab for Offices
"""
return self.org.get("offices_tab", True)
def get_org_projects_tab(self):
"""
Whether to show a Tab for Projects
"""
return self.org.get("projects_tab", True) # Will be hidden anyway if Projects module disabled
def get_org_regions(self):
"""
Whether to support Organisation Regions or not
"""
return self.org.get("regions", False)
def get_org_region_countries(self):
"""
Whether Organisation Regions maintain a list of countries
"""
return self.org.get("region_countries", False)
def get_org_regions_hierarchical(self):
"""
Whether Organisation Regions are Hierarchical or not
"""
return self.org.get("regions_hierarchical", False)
def get_org_resources_tab(self):
"""
Whether to show a Tab for Organisation Resources
"""
return self.org.get("resources_tab", False)
def get_org_services_hierarchical(self):
"""
Whether Organisation Servics are Hierarchical or not
"""
return self.org.get("services_hierarchical", False)
def get_org_service_locations(self):
"""
Whether to expose the service locations tab for organisations
"""
return self.__lazy("org", "service_locations", default=False)
def get_org_site_code_len(self):
"""
Length of auto-generated Codes for Facilities (org_site)
"""
return self.org.get("site_code_len", 10)
def get_org_site_label(self):
"""
Label for site_id fields
"""
return current.T(self.org.get("site_label", "Facility"))
def get_org_site_inv_req_tabs(self):
"""
Whether Sites should have Tabs for Inv/Req
"""
return self.org.get("site_inv_req_tabs", True)
def get_org_site_autocomplete(self):
"""
Whether site_id fields should use an Autocomplete instead of a dropdown
"""
return self.org.get("site_autocomplete", False)
def get_org_site_autocomplete_fields(self):
"""
Which extra fields should be returned in S3SiteAutocompleteWidget
"""
return self.org.get("site_autocomplete_fields", ("instance_type",))
def get_org_site_last_contacted(self):
"""
Whether to display the last_contacted field for a Site
"""
return self.org.get("site_last_contacted", False)
def get_org_site_volunteers(self):
"""
Whether volunteers can be assigned to Sites
"""
return self.org.get("site_volunteers", False)
def get_org_site_check(self):
"""
Get custom tasks for scheduled site checks
"""
return self.org.get("site_check")
def set_org_dependent_field(self,
tablename=None,
fieldname=None,
enable_field=True):
"""
Enables/Disables optional fields according to a user's Organisation
- must specify either field or tablename/fieldname
(e.g. for virtual fields)
"""
enabled = False
dependent_fields = self.org.get("dependent_fields")
if dependent_fields:
org_name_list = dependent_fields.get("%s.%s" % (tablename,
fieldname),
None)
if org_name_list:
auth = current.auth
if auth.s3_has_role(auth.get_system_roles().ADMIN):
# Admins see all fields unless disabled for all orgs in this deployment
enabled = True
else:
root_org = auth.root_org_name()
enabled = root_org in org_name_list
else:
# Enable if empty list
enabled = True
if enable_field:
field = current.s3db[tablename][fieldname]
field.readable = enabled
field.writable = enabled
return enabled
def get_org_office_code_unique(self):
"""
Whether Office code is unique
"""
return self.org.get("office_code_unique", False)
def get_org_facility_code_unique(self):
"""
Whether Facility code is unique
"""
return self.org.get("facility_code_unique", False)
def get_org_tags(self):
"""
Whether Organisations, Offices & Facilities should show a Tags tab
"""
return self.org.get("tags", False)
# -------------------------------------------------------------------------
# Police
#
def get_police_station_code_unique(self):
"""
Whether Police Station code is unique
"""
return self.police.get("police_station_unique", False)
# -------------------------------------------------------------------------
# Persons
#
def get_pr_age_group(self, age):
"""
Function to provide the age group for an age
"""
fn = self.pr.get("age_group")
if fn:
group = fn(age)
else:
# Default
if age < 18 :
group = "-17" # "< 18"/" < 18" don't sort correctly
elif age < 25 :
group = "18-24"
elif age < 40:
group = "25-39"
elif age < 60:
group = "40-59"
else:
group = "60+"
return group
def get_pr_person_availability_options(self):
"""
Dict of integer-keyed options for Person Availability
"""
return self.__lazy("pr", "person_availability_options", default=None)
def get_pr_availability_json_rules(self):
"""
Generate availability rules from schedule_json
"""
return self.pr.get("availability_json_rules", False)
def get_pr_editable_fields(self):
"""
Fields which are editable in the AddPersonWidget
"""
return self.pr.get("editable_fields", [])
def get_pr_hide_third_gender(self):
"""
Whether to hide the third gender ("Other")
"""
return self.__lazy("pr", "hide_third_gender", default=True)
def get_pr_nationality_explicit_unclear(self):
"""
Have an explicit "unclear" option for nationality, useful
when the field shall be mandatory yet allow for cases
where the nationality of a person is unclear
"""
return self.pr.get("nationality_explicit_unclear", False)
def get_pr_import_update_requires_email(self):
"""
During imports, records are only updated if the import
item contains a (matching) email address
"""
return self.pr.get("import_update_requires_email", True)
def get_pr_label_fullname(self):
"""
Label for the AddPersonWidget's 'Name' field
"""
return self.__lazy("pr", "label_fullname", default="Name")
def get_pr_lookup_duplicates(self):
"""
Whether the AddPersonWidget does a fuzzy search for duplicates
"""
return self.pr.get("lookup_duplicates", False)
def get_pr_request_dob(self):
""" Include Date of Birth in the AddPersonWidget """
return self.__lazy("pr", "request_dob", default=True)
def get_pr_dob_required(self):
""" Whether Date of Birth is Mandatory, including in the AddPersonWidget """
return self.__lazy("pr", "dob_required", default=False)
def get_pr_request_email(self):
""" Include Email in the AddPersonWidget """
return self.__lazy("pr", "request_email", default=True)
def get_pr_request_father_name(self):
""" Include Father Name in the AddPersonWidget """
return self.__lazy("pr", "request_father_name", default=False)
def get_pr_request_grandfather_name(self):
""" Include GrandFather Name in the AddPersonWidget """
return self.__lazy("pr", "request_grandfather_name", default=False)
def get_pr_request_gender(self):
""" Include Gender in the AddPersonWidget """
return self.__lazy("pr", "request_gender", default=True)
def get_pr_request_home_phone(self):
""" Include Home Phone in the AddPersonWidget """
return self.__lazy("pr", "request_home_phone", default=False)
def get_pr_request_mobile_phone(self):
""" Include Mobile Phone in the AddPersonWidget """
return self.__lazy("pr", "request_mobile_phone", default=True)
def get_pr_request_tags(self):
"""
Include Tags in the AddPersonWidget
List of Tuples: (label, tag)
"""
return self.__lazy("pr", "request_tags", default=[])
def get_pr_request_year_of_birth(self):
""" Include Year of Birth in the AddPersonWidget """
return self.__lazy("pr", "request_year_of_birth", default=False)
def get_pr_name_format(self):
"""
Format with which to represent Person Names
Generally want an option in AddPersonWidget to handle the input like this too
"""
return self.__lazy("pr", "name_format", default="%(first_name)s %(middle_name)s %(last_name)s")
def get_pr_search_shows_hr_details(self):
"""
Whether S3PersonAutocompleteWidget results show the details of their HR record
"""
return self.pr.get("search_shows_hr_details", True)
def get_pr_separate_name_fields(self):
"""
Whether the AddPersonWidget provides separate name fields or not
Options:
False (single field)
2 (first/last)
3 (first/middle/last)
"""
return self.__lazy("pr", "separate_name_fields", False)
def get_pr_use_address(self):
"""
Whether or not to show an address tab in person details
"""
return self.pr.get("use_address", True)
def get_pr_show_emergency_contacts(self):
"""
Show emergency contacts as well as standard contacts in Person Contacts page
"""
return self.pr.get("show_emergency_contacts", True)
def get_pr_contacts_tabs(self):
"""
Which tabs to show for contacts: all, public &/or private
- a tuple or list with all|private|public, or
- a dict with labels per contacts group
(defaults see get_pr_contacts_tab_label)
"""
contacts_tabs = self.pr.get("contacts_tabs", ("all",))
if not contacts_tabs:
return () # iterable expected
return contacts_tabs
def get_pr_contacts_tab_label(self, group="all"):
"""
Labels for contacts tabs
"""
defaults = {"all": "Contacts",
"private_contacts": "Private Contacts",
"public_contacts": "Public Contacts",
}
tabs = self.get_pr_contacts_tabs()
label = tabs.get(group) if type(tabs) is dict else None
if label is None:
# Use default label
label = defaults.get(group)
return current.T(label) if label else label
def get_pr_multiple_case_groups(self):
"""
Whether a person can belong to multiple case groups at the same time
"""
return self.pr.get("multiple_case_groups", False)
# -------------------------------------------------------------------------
# Proc
#
def get_proc_form_name(self):
return self.proc.get("form_name", "Purchase Order")
def get_proc_shortname(self):
return self.proc.get("form_name", "PO")
# -------------------------------------------------------------------------
# Projects
#
def get_project_mode_3w(self):
"""
Enable 3W mode in the projects module
"""
return self.project.get("mode_3w", False)
def get_project_mode_task(self):
"""
Enable Tasks mode in the projects module
"""
return self.project.get("mode_task", False)
def get_project_mode_drr(self):
"""
Enable DRR extensions in the projects module
"""
return self.project.get("mode_drr", False)
def get_project_activities(self):
"""
Use Activities in Projects & Tasks
"""
return self.project.get("activities", False)
def get_project_activity_beneficiaries(self):
"""
Use Beneficiaries in Activities
"""
setting = self.project.get("activity_beneficiaries", None)
if setting is None:
setting = self.has_module("stats")
return setting
def get_project_activity_items(self):
"""
Use Items in Activities
"""
setting = self.project.get("activity_items", None)
if setting is None:
setting = self.has_module("supply")
return setting
def get_project_activity_sectors(self):
"""
Use Sectors in Activities
"""
return self.project.get("activity_sectors", False)
def get_project_activity_types(self):
"""
Use Activity Types in Activities & Projects
"""
return self.project.get("activity_types", False)
def get_project_activity_filter_year(self):
"""
Filter according to Year in Activities
"""
return self.project.get("activity_filter_year", False)
def get_project_assign_staff_tab(self):
"""
Show the 'Assign Staff' tab in Projects (if the user has permission to do so)
"""
return self.__lazy("project", "assign_staff_tab", default=True)
def get_project_budget_monitoring(self):
"""
Whether to Monitor Project Budgets
"""
return self.project.get("budget_monitoring", False)
def get_project_codes(self):
"""
Use Codes in Projects
"""
return self.project.get("codes", False)
def get_project_community(self):
"""
Label project_location as 'Community'
"""
return self.project.get("community", False)
def get_project_community_volunteers(self):
"""
Manage Community Volunteers in Projects
"""
return self.project.get("community_volunteers", False)
def get_project_demographics(self):
"""
Use Demographics in Projects
"""
return self.project.get("demographics", False)
def get_project_details_tab(self):
"""
Group Tabs on Projects into a single 'Details' page
"""
return self.project.get("details_tab", False)
def get_project_event_activities(self):
"""
Link Activities to Events
"""
return self.project.get("event_activities", False)
def get_project_event_projects(self):
"""
Link Projects to Events
"""
return self.project.get("event_projects", False)
def get_project_goals(self):
"""
Use Goals in Projects
"""
return self.project.get("goals", False)
def get_project_hazards(self):
"""
Use Hazards in DRR Projects
"""
use_hazards = self.project.get("hazards")
if use_hazards is None:
# Default to True if mode_drr
use_hazards = self.get_project_mode_drr()
return use_hazards
def get_project_hfa(self):
"""
Use HFA Priorities in DRR Projects
"""
use_hfa = self.project.get("hfa")
if use_hfa is None:
# Default to True if mode_drr
use_hfa = self.get_project_mode_drr()
return use_hfa
def get_project_indicators(self):
"""
Use Indicators in Projects
"""
return self.project.get("indicators", False)
def get_project_indicator_criteria(self):
"""
Use Indicator Criteria in Projects
"""
return self.project.get("indicator_criteria", False)
def get_project_status_from_activities(self):
"""
Use Activity Statuses to build Project Status (instead of Indicator Data)
"""
return self.project.get("status_from_activities", False)
#def get_project_locations_from_countries(self):
# """
# Create a project_location for each country that a Project is
# implemented in
# """
# return self.project.get("locations_from_countries", False)
def get_project_milestones(self):
"""
Use Milestones in Projects & Tasks
"""
return self.project.get("milestones", False)
def get_project_outcomes(self):
"""
Use Outcomes in Projects
"""
return self.project.get("outcomes", False)
def get_project_outputs(self):
"""
Use Outputs in Projects
"""
return self.project.get("outputs", "inline")
def get_project_planning_ondelete(self):
"""
Whether the Project Planning data should CASCADE ondelete or RESTRICT
NB This cannot be edited on the fly, or vary by context
It needs defining before the database is created.
"""
return self.project.get("planning_ondelete", "CASCADE")
def get_project_projects(self):
"""
Link Activities & Tasks to Projects
"""
return self.project.get("projects", False)
def get_project_programmes(self):
"""
Use Programmes in Projects
"""
return self.project.get("programmes", False)
def get_project_programme_budget(self):
"""
Use Budgets in Programmes
"""
return self.project.get("programme_budget", False)
def get_project_sectors(self):
"""
Use Sectors in Projects
"""
return self.project.get("sectors", True)
def get_project_themes(self):
"""
Use Themes in 3W Projects
"""
return self.project.get("themes", False)
def get_project_theme_percentages(self):
"""
Use Theme Percentages in Projects
"""
return self.project.get("theme_percentages", False)
def get_project_multiple_budgets(self):
"""
Use Multiple Budgets in Projects
"""
return self.project.get("multiple_budgets", False)
def get_project_multiple_organisations(self):
"""
Use Multiple Organisations in Projects
"""
return self.project.get("multiple_organisations", False)
def get_project_organisation_roles(self):
"""
Organisation roles within projects
"""
T = current.T
return self.project.get("organisation_roles", {
1: T("Lead Implementer"), # T("Host National Society")
2: T("Partner"), # T("Partner National Society")
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier") # T("Beneficiary")?
})
def get_project_organisation_lead_role(self):
"""
The lead role of organisations within projects
"""
return self.project.get("organisation_lead_role", 1)
def get_project_task_status_opts(self):
"""
The list of options for the Status of a Task.
NB Whilst the list can be customised, doing so makes it harder to
do synchronization.
There are also hard-coded elements within XSL & styling of
project_task_list_layout which will break if these are changed.
Best bet is simply to comment statuses that you don't wish to use
& tweak the label (whilst keeping the meaning) of those you retain
Those which are deemed as 'active' are currently not customisable
for this reason.
"""
T = current.T
return self.project.get("task_status_opts", {1: T("Draft"),
2: T("New"),
3: T("Assigned"),
4: T("Feedback"),
5: T("Blocked"),
6: T("On Hold"),
7: T("Canceled"),
8: T("Duplicate"),
9: T("Ready"),
10: T("Verified"),
11: T("Reopened"),
12: T("Completed"),
})
def get_project_task_priority_opts(self):
"""
The list of options for the Priority of a Task.
NB Whilst the list can be customised, doing so makes it harder to
do synchronization.
There are also hard-coded elements within XSL & styling of
project_task_list_layout which will break if these are changed.
Best bet is simply to comment statuses that you don't wish to use
& tweak the label (whilst keeping the meaning) of those you retain
"""
T = current.T
return self.project.get("task_priority_opts", {1: T("Urgent"),
2: T("High"),
3: T("Normal"),
4: T("Low")
})
def get_project_task_time(self):
"""
Whether to use hours logging for tasks
"""
return self.project.get("task_time", True)
def get_project_my_tasks_include_team_tasks(self):
"""
"My Open Tasks" to include team tasks
"""
return self.project.get("my_tasks_include_team_tasks", False)
# -------------------------------------------------------------------------
# Requests Management Settings
#
def get_req_req_type(self):
"""
The Types of Request which can be made.
Select one or more from:
* People
* Stock
* Other
tbc: Assets, Shelter, Food
"""
return self.req.get("req_type", ("Stock", "People", "Other"))
def get_req_type_inv_label(self):
return current.T(self.req.get("type_inv_label", "Warehouse Stock"))
def get_req_type_hrm_label(self):
return current.T(self.req.get("type_hrm_label", "People"))
def get_req_copyable(self):
"""
Provide a Copy button for Requests?
"""
return self.req.get("copyable", False)
def get_req_recurring(self):
"""
Do we allow creation of recurring requests?
"""
return self.req.get("recurring", True)
def get_req_requester_label(self):
return current.T(self.req.get("requester_label", "Requester"))
def get_req_requester_optional(self):
return self.req.get("requester_optional", False)
def get_req_requester_is_author(self):
"""
Whether the User Account logging the Request is normally the Requester
"""
return self.req.get("requester_is_author", True)
def get_req_requester_from_site(self):
"""
Whether the Requester has to be a staff of the site making the Request
"""
return self.req.get("requester_from_site", False)
def get_req_requester_to_site(self):
"""
Whether to set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
"""
return self.req.get("requester_to_site", False)
def get_req_date_writable(self):
""" Whether Request Date should be manually editable """
return self.req.get("date_writable", True)
def get_req_status_writable(self):
""" Whether Request Status should be manually editable """
return self.req.get("status_writable", True)
def get_req_item_quantities_writable(self):
""" Whether Item Quantities should be manually editable """
return self.req.get("item_quantities_writable", False)
def get_req_skill_quantities_writable(self):
""" Whether People Quantities should be manually editable """
return self.req.get("skill_quantities_writable", False)
def get_req_summary(self):
# Whether to use Summary page for Requests
return self.req.get("summary", False)
def get_req_pack_values(self):
"""
Do we show pack values in Requests?
"""
return self.req.get("pack_values", True)
def get_req_multiple_req_items(self):
"""
Can a Request have multiple line items?
- e.g. ICS says that each request should be just for items of a single Type
"""
return self.req.get("multiple_req_items", True)
def get_req_show_quantity_transit(self):
return self.req.get("show_quantity_transit", True)
def get_req_inline_forms(self):
"""
Whether Requests module should use inline forms for Items/Skills
"""
return self.req.get("inline_forms", True)
def get_req_prompt_match(self):
"""
Whether a Requester is prompted to match each line item in an Item request
"""
return self.req.get("prompt_match", True)
#def get_req_summary(self):
# """
# Whether to use Summary Needs for Sites (Office/Facility currently):
# """
# return self.req.get("summary", False)
def get_req_use_commit(self):
"""
Whether there is a Commit step in Requests Management
"""
return self.req.get("use_commit", True)
def get_req_commit_people(self):
"""
Whether Skills Requests should be Committed with Named Indviduals
or just Anonymous Skill
@ToDo: Make this do something
"""
return self.req.get("commit_people", False)
def get_req_commit_value(self):
"""
Whether Donations should have a Value field
"""
return self.req.get("commit_value", False)
def get_req_commit_without_request(self):
"""
Whether to allow Donations to be made without a matching Request
"""
return self.req.get("commit_without_request", False)
def get_req_committer_is_author(self):
""" Whether the User Account logging the Commitment is normally the Committer """
return self.req.get("committer_is_author", True)
def get_req_ask_security(self):
"""
Should Requests ask whether Security is required?
"""
return self.req.get("ask_security", False)
def get_req_ask_transport(self):
"""
Should Requests ask whether Transportation is required?
"""
return self.req.get("ask_transport", False)
def get_req_items_ask_purpose(self):
"""
Should Requests for Items ask for Purpose?
"""
return self.req.get("items_ask_purpose", True)
def get_req_req_crud_strings(self, req_type=None):
return self.req.get("req_crud_strings") and \
self.req.req_crud_strings.get(req_type)
def get_req_use_req_number(self):
return self.req.get("use_req_number", True)
def get_req_generate_req_number(self):
return self.req.get("generate_req_number", True)
def get_req_form_name(self):
return self.req.get("req_form_name", "Requisition Form")
def get_req_shortname(self):
return self.req.get("req_shortname", "REQ")
def get_req_restrict_on_complete(self):
"""
To restrict adding new commits to the Completed commits.
"""
return self.req.get("req_restrict_on_complete", False)
# -------------------------------------------------------------------------
# Supply
#
def get_supply_catalog_default(self):
"""
The name of the Default Item Catalog
"""
return self.supply.get("catalog_default", "Default")
def get_supply_catalog_multi(self):
"""
Whether to use Multiple Item Catalogs
"""
return self.supply.get("catalog_multi", True)
def get_supply_use_alt_name(self):
"""
Whether to allow Alternative Items to be defined
"""
return self.supply.get("use_alt_name", True)
def get_supply_shipping_code(self):
"""
Custom shipping code generator (REQ, WB, GRN etc)
- function(prefix, site_id, field)
"""
return self.supply.get("shipping_code")
# -------------------------------------------------------------------------
# Vulnerability
#
def get_vulnerability_indicator_hierarchical(self):
return self.vulnerability.get("indicator_hierarchical", False)
# -------------------------------------------------------------------------
# Transport
#
def get_transport_airport_code_unique(self):
"""
Whether Airport code is unique
"""
return self.transport.get("airport_code_unique", False)
def get_transport_heliport_code_unique(self):
"""
Whether Heliport code is unique
"""
return self.transport.get("heliport_code_unique", False)
def get_transport_seaport_code_unique(self):
"""
Whether Seaport code is unique
"""
return self.transport.get("seaport_code_unique", False)
# -------------------------------------------------------------------------
# XForms
#
def get_xforms_resources(self):
"""
A list of xform resources
Item formats:
"tablename"
("Title", "tablename")
("Title", "tablename", options)
Format for options:
{c=controller, ...use this controller for form handling
f=function, ...use this function for form handling
vars=vars, ...add these vars to the download URL
title=title_field, ...use this field in template for form title
public=public_flag, ...check this field whether the template is
public or not (must be boolean)
}
Example:
settings.xforms.resources = [("Request", "req_req")]
@todo: move this documentation to the wiki?
"""
return self.xforms.get("resources")
# -------------------------------------------------------------------------
# Frontpage Options
#
def get_frontpage(self, key=None, default=None):
"""
Template-specific frontpage configuration options
"""
if key:
return self.frontpage.get(key, default)
else:
return default
# -------------------------------------------------------------------------
# Custom template options
#
def get_custom(self, key=None, default=None):
"""
Template-specific configuration options
"""
if key:
return self.custom.get(key, default)
else:
return default
# -------------------------------------------------------------------------
# Utilities
#
def __lazy(self, subset, key, default=None):
"""
Resolve a "lazy" setting: when the config setting is callable,
call it once and store the result. A callable setting takes
the default value as parameter.
This method allows settings to depend on user authentication
(e.g. org-dependent variations), or involve database lookups,
which is only possible /after/ the initial config.py run.
Normal pattern:
return self.<subset>.get(key, default)
Lazy pattern:
return self.__lazy(subset, key, default)
@param subset: the name of the subset of settings (typically the module)
@param key: the setting name
@param default: the default value
"""
setting = self[subset].get(key, default)
if callable(setting):
# Check to see if we have already done the lazy lookup
# (Some settings options are callables themselves)
_key = "%s_%s" % (subset, key)
if _key not in self._lazy_unwrapped:
# Unwrap
self[subset][key] = setting = setting(default)
# Mark as unwrapped, so we don't do it a 2nd time
self._lazy_unwrapped.append(_key)
return setting
# END =========================================================================
| 35.03497 | 215 | 0.552156 |
426e0a73305be75190fc3c4536a37137c05ef431 | 3,690 | py | Python | Validation/CaloTowers/test/run_onRelVal_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Validation/CaloTowers/test/run_onRelVal_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Validation/CaloTowers/test/run_onRelVal_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import os
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
process = cms.Process("hcalval")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['startup']
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.load("DQMServices.Core.DQM_cfg")
process.DQM.collectorHost = ''
process.load("DQMServices.Components.MEtoEDMConverter_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
),
inputCommands = cms.untracked.vstring('keep *', 'drop *_MEtoEDMConverter_*_*')
)
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *', 'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string("HcalValHarvestingEDM.root")
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
process.hcalTowerAnalyzer = DQMEDAnalyzer('CaloTowersValidation',
outputFile = cms.untracked.string('CaloTowersValidationRelVal.root'),
CaloTowerCollectionLabel = cms.untracked.InputTag('towerMaker'),
hcalselector = cms.untracked.string('all'),
mc = cms.untracked.string('no'),
useAllHistos = cms.untracked.bool(False)
)
process.hcalNoiseRates = DQMEDHarvester('NoiseRates',
outputFile = cms.untracked.string('NoiseRatesRelVal.root'),
rbxCollName = cms.untracked.InputTag('hcalnoise'),
minRBXEnergy = cms.untracked.double(20.0),
minHitEnergy = cms.untracked.double(1.5),
useAllHistos = cms.untracked.bool(False)
)
process.hcalRecoAnalyzer = DQMEDAnalyzer('HcalRecHitsValidation',
outputFile = cms.untracked.string('HcalRecHitValidationRelVal.root'),
HBHERecHitCollectionLabel = cms.untracked.InputTag("hbhereco"),
HFRecHitCollectionLabel = cms.untracked.InputTag("hfreco"),
HORecHitCollectionLabel = cms.untracked.InputTag("horeco"),
eventype = cms.untracked.string('multi'),
ecalselector = cms.untracked.string('yes'),
hcalselector = cms.untracked.string('all'),
mc = cms.untracked.string('no'),
)
process.load('Configuration/StandardSequences/EDMtoMEAtRunEnd_cff')
process.dqmSaver.referenceHandling = cms.untracked.string('all')
cmssw_version = os.environ.get('CMSSW_VERSION','CMSSW_X_Y_Z')
Workflow = '/HcalValidation/'+'Harvesting/'+str(cmssw_version)
process.dqmSaver.workflow = Workflow
process.calotowersClient = DQMEDHarvester("CaloTowersClient",
outputFile = cms.untracked.string('CaloTowersHarvestingME.root'),
DQMDirName = cms.string("/") # root directory
)
process.noiseratesClient = DQMEDHarvester("NoiseRatesClient",
outputFile = cms.untracked.string('NoiseRatesHarvestingME.root'),
DQMDirName = cms.string("/") # root directory
)
process.hcalrechitsClient = DQMEDHarvester("HcalRecHitsClient",
outputFile = cms.untracked.string('HcalRecHitsHarvestingME.root'),
DQMDirName = cms.string("/") # root directory
)
process.p2 = cms.Path( process.hcalTowerAnalyzer * process.hcalNoiseRates * process.hcalRecoAnalyzer
* process.calotowersClient * process.noiseratesClient * process.hcalrechitsClient * process.dqmSaver)
| 40.108696 | 124 | 0.725203 |
731b5992e78b7079dc5d23cc0996a2e6851552e8 | 5,678 | py | Python | lib/bridgedb/configure.py | pagea/bridgedb | a5409eef0f358e4aaaaa252f06679b3088c80f98 | [
"BSD-3-Clause-Clear"
] | null | null | null | lib/bridgedb/configure.py | pagea/bridgedb | a5409eef0f358e4aaaaa252f06679b3088c80f98 | [
"BSD-3-Clause-Clear"
] | null | null | null | lib/bridgedb/configure.py | pagea/bridgedb | a5409eef0f358e4aaaaa252f06679b3088c80f98 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 ; test-case-name: bridgedb.test.test_configure -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: please see the AUTHORS file for attributions
# :copyright: (c) 2013-2014, Isis Lovecruft
# (c) 2013-2014, Matthew Finkel
# (c) 2007-2014, Nick Mathewson
# (c) 2007-2014, The Tor Project, Inc.
# :license: see LICENSE for licensing information
"""Utilities for dealing with configuration files for BridgeDB."""
import logging
import os
def loadConfig(configFile=None, configCls=None):
"""Load configuration settings on top of the current settings.
All pathnames and filenames within settings in the ``configFile`` will be
expanded, and their expanded values will be stored in the returned
:class:`config <Conf>` object.
** Note: **
On the strange-looking use of
``exec compile(open(configFile).read(), '<string>', 'exec') in dict()``
in this function:
The contents of the config file should be compiled first, and then
``exec``ed -- not ``execfile``! -- in order to get the contents of the
config file to exist within the scope of the configuration dictionary.
Otherwise, Python *will* default_ to executing the config file directly
within the ``globals()`` scope.
Additionally, it's roughly 20-30 times faster_ to use the ``compile``
builtin on a string (the contents of the file) before ``exec``ing it, than
using ``execfile`` directly on the file.
.. _default: http://stackoverflow.com/q/17470193
.. _faster: http://lucumr.pocoo.org/2011/2/1/exec-in-python/
:ivar boolean itsSafeToUseLogging: This is called in :func:`startup`
before :func:`safelog.configureLogging`. When called from ``startup``,
the ``configCls`` parameter is not given, because that is the first
time that a :class:`Conf` is created. If a :class:`logging.Logger` is
created in this function, then logging will not be correctly
configured, therefore, if the ``configCls`` parameter is not given,
then it's the first time this function has been called and it is
therefore not safe to make calls to the logging module.
:type: configFile: string or None
:param configFile: If given, the filename of the config file to load.
:type configCls: :class:`Conf` or None
:param configCls: The current configuration instance, if one already
exists.
:returns: A new :class:`configuration <bridgedb.configure.Conf>`, with the
old settings as defaults, and the settings from the **configFile** (if
given) overriding those defaults.
"""
itsSafeToUseLogging = False
configuration = {}
if configCls:
itsSafeToUseLogging = True
oldConfig = configCls.__dict__
configuration.update(**oldConfig) # Load current settings
logging.info("Reloading over in-memory configurations...")
conffile = configFile
if (configFile is None) and ('CONFIG_FILE' in configuration):
conffile = configuration['CONFIG_FILE']
if conffile is not None:
if itsSafeToUseLogging:
logging.info("Loading settings from config file: '%s'" % conffile)
compiled = compile(open(conffile).read(), '<string>', 'exec')
exec compiled in configuration
if itsSafeToUseLogging:
logging.debug("New configuration settings:")
logging.debug("\n".join(["{0} = {1}".format(key, value)
for key, value in configuration.items()]))
# Create a :class:`Conf` from the settings stored within the local scope
# of the ``configuration`` dictionary:
config = Conf(**configuration)
# We want to set the updated/expanded paths for files on the ``config``,
# because the copy of this config, `state.config` is used later to compare
# with a new :class:`Conf` instance, to see if there were any changes.
#
# See :meth:`bridgedb.persistent.State.useUpdatedSettings`.
for attr in ["PROXY_LIST_FILES", "BRIDGE_FILES", "EXTRA_INFO_FILES"]:
setting = getattr(config, attr, None)
if setting is None: # pragma: no cover
setattr(config, attr, []) # If they weren't set, make them lists
else:
setattr(config, attr, # If they were set, expand the paths:
[os.path.abspath(os.path.expanduser(f)) for f in setting])
for attr in ["DB_FILE", "DB_LOG_FILE", "MASTER_KEY_FILE", "PIDFILE",
"ASSIGNMENTS_FILE", "HTTPS_CERT_FILE", "HTTPS_KEY_FILE",
"LOG_FILE", "STATUS_FILE", "COUNTRY_BLOCK_FILE",
"GIMP_CAPTCHA_DIR", "GIMP_CAPTCHA_HMAC_KEYFILE",
"GIMP_CAPTCHA_RSA_KEYFILE"]:
setting = getattr(config, attr, None)
if setting is None:
setattr(config, attr, setting)
else:
setattr(config, attr, os.path.abspath(os.path.expanduser(setting)))
for attr in ["FORCE_PORTS", "FORCE_FLAGS"]:
setting = getattr(config, attr, []) # Default to empty lists
setattr(config, attr, setting)
for domain in config.EMAIL_DOMAINS:
config.EMAIL_DOMAIN_MAP[domain] = domain
if conffile: # Store the pathname of the config file, if one was used
config.CONFIG_FILE = os.path.abspath(os.path.expanduser(conffile))
return config
class Conf(object):
"""A configuration object. Holds unvalidated attributes."""
def __init__(self, **attrs):
for key, value in attrs.items():
if key == key.upper():
if not key.startswith('__'):
self.__dict__[key] = value
| 42.691729 | 79 | 0.656745 |
2c616b178f8e5e8660d53992a866c0d26b43e391 | 6,647 | py | Python | proj/surveys/junk.py | glasst/CS491-Research-Survey-Tool- | 4e379de8989ecd35cdad5875d9afb74906ed6009 | [
"MIT"
] | 1 | 2022-03-02T06:06:34.000Z | 2022-03-02T06:06:34.000Z | proj/surveys/junk.py | glasst/CS491-Research-Survey-Tool- | 4e379de8989ecd35cdad5875d9afb74906ed6009 | [
"MIT"
] | null | null | null | proj/surveys/junk.py | glasst/CS491-Research-Survey-Tool- | 4e379de8989ecd35cdad5875d9afb74906ed6009 | [
"MIT"
] | null | null | null | # For dead code
########################################################################################################################
# FROM views.py
'''
def surveycompletion(request):
survey_Id = request.session.get('survey_Id')
#questions = Question.objects.filter(question_survey_Id=surveyid)
#mclist = MCQuestion.objects.filter(question_survey_Id=survey_Id)
#telist = TEQuestion.objects.filter(question_survey_Id=survey_Id)
#cblist = CBQuestion.objects.filter(question_survey_Id=survey_Id)
# Still need to get cross-Question table querying
# for q in questions:
# qid = q.question_Id
#
# if q.question_type == 'MC':
# qq = MCQuestion.objects.filter(question_Id=qid)
# mclist.append(qq)
#
# if q.question_type == 'TE':
# qq = MCQuestion.objects.filter(question_Id=qid)
# telist.append(qq)
#
# if q.question_type == 'CB':
# qq = MCQuestion.objects.filter(question_Id=qid)
# cblist.append(qq)
#context = {'survey_Id': survey_Id, 'mclist': mclist, 'telist': telist, 'cblist': cblist}
context = {'survey_Id': survey_Id}
return render(
request,
'survey-completion.html',
context
)
'''
'''
# prints list of all survey objects
def index(request):
user_surveys = Survey.objects.filter(creator_Id=request.user)
if request.method == 'POST':
form = SurveyForm(request.POST)
if form.is_valid():
survey = form.save(commit=False)
survey.survey_Id = uuid.uuid4()
survey.save()
# return redirect(reverse('surveys:add_survey'))
return redirect(reverse('surveys:detail', kwargs={'survey_Id': survey.survey_Id}))
else:
form = SurveyForm()
return render(request, 'surveys/index.html', {'user_surveys': user_surveys})
'''
'''
# page of specific survey listing its questions
def detail(request, survey_Id):
survey = get_object_or_404(Survey, survey_Id=survey_Id)
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
QUESTIONPAGES = {
'MC': 'multiplechoice.html',
'TE': 'textentry.html',
'CB': 'checkbox.html',
}
nextpage = '/surveys/'
question = form.save(commit=False)
question.question_Id = uuid.uuid4()
question.question_type = request.POST.get('question_type')
# question.question_survey_Id = request.POST.get('question_survey_Id')
type = request.POST.get('question_type')
nextpage += QUESTIONPAGES[type]
question.save() # save to DB
# return HttpResponseRedirect(nextpage)
#if type is 'MC':
# return redirect(reverse('surveys:multiplechoice', kwargs={'survey_Id': survey.survey_Id}))
#elif type is 'TE':
# return redirect(reverse('surveys:textentry', kwargs={'survey_Id': survey.survey_Id}))
#else:
# return redirect(reverse('surveys:checkbox', kwargs={'survey_Id': survey.survey_Id}))
# else:
# print(form.errors, len(form.errors))
else:
form = QuestionForm()
# return render(request, 'surveys/add_question.html', {'survey': survey_Id})
return render(request, 'surveys/detail.html', {'survey': survey})
'''
'''
def add_question(request, survey_Id):
QUESTIONPAGES = {
'MC': 'multiplechoice.html',
'TE': 'textentry.html',
'CB': 'checkbox.html',
}
nextpage = '/surveys/'
question_form = QuestionForm()
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
# owningsurvey = request.
# set_survey_foreign_key(owningsurvey)
# https://chriskief.com/2013/05/24/django-form-wizard-and-getting-data-from-previous-steps/
# https://docs.djangoproject.com/en/1.7/ref/contrib/formtools/form-wizard/
question = form.save(commit=False)
question.question_Id = uuid.uuid4()
question.question_survey_Id = survey_Id
nextpage += QUESTIONPAGES[request.POST.get('question_type')]
question.save() # save to DB
# return HttpResponseRedirect(nextpage)
return redirect(reverse('surveys:detail', args=(survey.survey_Id)))
else:
form = QuestionForm()
return render(request, 'surveys/add_question.html', {'survey': survey_Id})
def new_question(request, survey_Id):
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
question = form.save(commit=False)
question.set(question_survey_Id=survey_Id)
nextpage += QUESTIONPAGES[request.POST.get('question_type')]
question.save() # save to DB
return redirect(reverse('surveys:detail', args=(survey.survey_Id,)))
else:
form = QuestionForm()
return render(request('add_question.html', context={'survey': survey.survey_Id, 'form': form}))
def delete_question(request, survey_Id):
survey = get_object_or_404(Survey, survey_Id=survey_Id)
# question = None
try:
# question = request.POST.get('question')
question = survey.question_set.get(question_Id=request.POST['question_Id'])
except (KeyError, Question.DoesNotExist):
return render(request, 'surveys/detail.html', {
'question': question,
'error_message': "You did not select a valid question",
})
else:
question.delete()
return render(request, 'surveys/detail.html', {'survey': survey})
def delete_survey(request, survey_Id):
survey = get_object_or_404(Survey, survey_Id=survey_Id)
survey.delete()
user_surveys = Survey.objects.filter(creator_Id=request.user)
return render(request, 'surveys/index.html', {'user_surveys': user_surveys})
'''
'''
def multiplechoice(request, survey_Id):
#return render(request('multiplechoice.html', context={'survey': survey.survey_Id, 'form': form}))
return redirect(reverse('surveys:multiplechoice', args=(survey.survey_Id,)))
def textentry(request, survey_Id):
return render(request('textentry.html', context={'survey': survey.survey_Id, 'form': form}))
def checkbox(request, survey_Id):
return render(request('checkbox.html', context={'survey': survey.survey_Id, 'form': form}))
'''
########################################################################################################################
| 36.125 | 120 | 0.605837 |
06ced43adceed7837f61942fc4aa2b6ed39f16f4 | 1,646 | py | Python | backend/AXIOME3_app/report/pcoa/views.py | neufeld/AXIOME3-GUI | 80b87753b47fab116324b4f0e4151c21ab3b1725 | [
"BSD-3-Clause"
] | 2 | 2021-02-25T16:59:12.000Z | 2021-02-25T20:06:15.000Z | backend/AXIOME3_app/report/pcoa/views.py | neufeld/AXIOME3-GUI | 80b87753b47fab116324b4f0e4151c21ab3b1725 | [
"BSD-3-Clause"
] | 7 | 2020-11-18T08:05:52.000Z | 2022-02-17T20:45:10.000Z | backend/AXIOME3_app/report/pcoa/views.py | neufeld/AXIOME3-GUI | 80b87753b47fab116324b4f0e4151c21ab3b1725 | [
"BSD-3-Clause"
] | null | null | null | from flask import Blueprint, request, send_file
import sys
import os
import io
from AXIOME3_app.utils import get_pcoa_plots_dir
blueprint = Blueprint("pcoa", __name__, url_prefix="/pcoa")
@blueprint.route("/columns", methods=['POST'])
def pcoa_columns():
uid = request.form["uid"]
PCOA_DIR = get_pcoa_plots_dir(uid)
if(uid == ''):
# sample output
json_file = os.path.join('/data/output/post_analysis/pcoa_plots/', 'pcoa_columns.json') # TEMP
else:
json_file = os.path.join(PCOA_DIR, 'pcoa_columns.json')
return send_file(json_file, mimetype='application/json')
@blueprint.route("/jpeg", methods=['POST'])
def pcoa_jpeg():
uid = request.form["uid"]
distance_type = request.form["distance"]
file_name = request.form["column"]
PCOA_DIR = get_pcoa_plots_dir(uid)
if(uid == ''):
# sample output
pcoa_plot = os.path.join('/data/output/post_analysis/pcoa_plots/', distance_type, file_name)
else:
pcoa_plot = os.path.join(PCOA_DIR, distance_type, file_name)
with open(pcoa_plot, 'rb') as bytes_obj:
return send_file(
io.BytesIO(bytes_obj.read()),
as_attachment=True,
attachment_filename=distance_type+"_"+file_name,
mimetype='image/jpeg'
)
@blueprint.route("/pdf", methods=['POST'])
def pcoa_pdf():
uid = request.form["uid"]
distance_type = request.form["distance"]
PCOA_DIR = get_pcoa_plots_dir(uid)
if(uid == ''):
# sample output
pdf_file = os.path.join('/data/output/post_analysis/pcoa_plots/', distance_type + '_pcoa_plots.pdf')
else:
pdf_file = os.path.join(PCOA_DIR, distance_type + '_pcoa_plots.pdf')
return send_file(pdf_file, mimetype='application/octet-stream', as_attachment=True) | 29.392857 | 102 | 0.72904 |
e60c435e95644ffdc73119bbb530423aeaa07102 | 458 | py | Python | opensource/opencv/workpixels.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | 1 | 2021-05-14T18:43:51.000Z | 2021-05-14T18:43:51.000Z | opensource/opencv/workpixels.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | null | null | null | opensource/opencv/workpixels.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | 9 | 2019-02-04T22:08:08.000Z | 2021-07-17T12:12:12.000Z | import cv2
#captura imagem colorida
imagem = cv2.imread("datasets/fotos/reuniao-professores.jpeg")
print imagem.shape
# Imagem em BRG ( de 0 a 2 )
# Canal Vermelho 2 , Canal Verde 1 , Canal Azul 0
print imagem.item(0,0,2),imagem.item(0,0,1),imagem.item(0,0,0)
# Alterando 1 Pixel na posicão 0,0 para cor vermalha
imagem.itemset((0,0,2),255)
imagem.itemset((0,0,1),0)
imagem.itemset((0,0,0),0)
cv2.imwrite("results/imagemvermelha.jpeg",imagem)
| 25.444444 | 62 | 0.71179 |
0bb68e18c8fb7814cc70c3327c0e963a47e4f3d9 | 11,486 | py | Python | generate.py | remcohaszing/calendar-generator | 6229241ab47d6d961fab7db4eceaf38e8deb872e | [
"WTFPL"
] | null | null | null | generate.py | remcohaszing/calendar-generator | 6229241ab47d6d961fab7db4eceaf38e8deb872e | [
"WTFPL"
] | null | null | null | generate.py | remcohaszing/calendar-generator | 6229241ab47d6d961fab7db4eceaf38e8deb872e | [
"WTFPL"
] | 1 | 2016-03-26T23:26:53.000Z | 2016-03-26T23:26:53.000Z | #!/usr/bin/env python3
"""
Generate a Dutch week calendar for an entire year.
The generated calendar is in odt format.
This script accepts a YAML data file which must contain the fields
``special dates``, ``birthdays`` and ``weddings``.
Example data file
.. code-block:: yaml
year: 2016
special dates:
03-04: May the Fourth be with you
03-05: Revenge of the Fifth
birthdays:
1991-01-11:
- Remco
1991-08-25:
- Linux
weddings:
2006-06-06
- - Husband
- Wife
"""
import argparse
import contextlib
import datetime
import locale
import os
import warnings
import yaml
from dateutil import easter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from relatorio.templates.opendocument import Template
class BadConfigError(Exception):
"""
Raised when an invalid configuration is found.
"""
def start_date(year):
"""
Find the first day of the first week of the given year.
Args:
year (int): The for which to find the first day of its first week.
Returns:
datetime.date: The first day of the first week of the given year.
"""
jan_01 = datetime.date(year, 1, 1)
return jan_01 - datetime.timedelta(days=jan_01.weekday())
def holiday(date):
"""
Return if the given date is a holiday.
Args:
date (datetime.date): The date for which to check if it's a holiday.
Returns:
str: The Dutch name of the holiday on that date or an empty string.
"""
# Simple hardcoded dates
if date.month == 1:
if date.day == 1:
return 'Nieuwjaar'
if date.day == 6:
return 'Drie Koningen'
if date.month == 2:
if date.day == 14:
return 'Valentijn'
if date.month == 4:
if date.day == 27:
return 'Koningsdag'
if date.month == 5:
if date.day == 4:
return 'Dodenherdenking'
if date.day == 5:
return 'Bevrijdingsdag'
if date.month == 7:
if date.day == 29:
return 'Frikandellendag'
if date.month == 10:
if date.day == 4:
return 'Dierendag'
if date.month == 12:
if date.day == 5:
return 'Sinterklaas'
if date.day == 25:
return 'Eerste Kerstdag'
if date.day == 26:
return 'Tweede Kerstdag'
if date.day == 31:
return 'Oudjaar'
# Nth sunday of month
if date.month == 3 and date.weekday() == 6 and date.day > 24:
return ('Zomertijd\n'
'Vergeet niet je klok niet een uur vooruit te zetten!')
if date.month == 5 and date.weekday() == 6 and 7 < date.day < 15:
return 'Moederdag'
if date.month == 6 and date.weekday() == 6 and 14 < date.day < 22:
return 'Vaderdag'
if date.month == 9 and date.weekday() == 1 and 15 < date.day < 23:
return 'Prinsjesdag'
if date.month == 10 and date.weekday() == 6 and date.day > 24:
return 'Wintertijd\nVergeet niet je klok een uur terug te zetten!'
# Easte related
easter_date = easter.easter(date.year)
if date == easter_date:
return 'Eerste Paasdag'
if date == easter_date + datetime.timedelta(days=-2):
return 'Goede Vrijdag'
if date == easter_date + datetime.timedelta(days=1):
return 'Tweede Paasdag'
if date == easter_date + datetime.timedelta(days=39):
return 'Hemelvaart'
if date == easter_date + datetime.timedelta(days=49):
return 'Eerste Pinksterdag'
if date == easter_date + datetime.timedelta(days=50):
return 'Tweede Pinksterdag'
carnaval_date = easter_date
i = 40 # Carnaval is 40 days before easter
while i:
# Sundays don't count towards these 40 days
if carnaval_date.weekday() != 6:
i -= 1
carnaval_date -= datetime.timedelta(days=1)
for i in range(3):
if date == carnaval_date - datetime.timedelta(days=i):
return 'Carnaval'
return ''
def process_birthdays(date, birthdays):
"""
Find and parse birthdays for the given date.
Args:
date (datetime.date): The date to process.
birthdays (dict): A mapping of birthdays to an iterable of names
whose birthday it is.
Yields:
dict: A dict containing a persons name and age.
"""
for birthdate, names in birthdays.items():
if birthdate.month == date.month and birthdate.day == date.day:
for name in names:
yield dict(
name=name,
age=date.year - birthdate.year
)
def process_weddings(date, weddings):
"""
Find and parse weddings for the given date.
Args:
date (datetime.date): The date to process.
weddings (dict): A mapping of wedding dates to an iterable of
iterables of names in that marriage.
Yields:
dict: A dict containing the age of the marriage and the names
joined to one string.
"""
for wedding_date, couples in weddings.items():
if wedding_date.month == date.month and wedding_date.day == date.day:
for couple in couples:
yield dict(
names=' & '.join(couple),
age=date.year - wedding_date.year
)
def day_to_dict(date, birthdays, weddings, special_dates):
"""
Convert a date to a dict of usable fields.
Args:
date (datetime.date): The date to process.
birthdays (dict): A dict of dates mapped to birthdays.
weddings (dict): A dict of dates mapped to wedding dates.
special_dates (dict): A dict mapping a date in the form '%m-%d'
to a special string to render.
Returns:
dict: A dict containing:
:day: The day of the month.
:month: The month of the date as a long string.
:short_month: The month of the date as a short lower case string.
:week_day: The day of the week as a long capitalized string.
:events: A list of strings representing the events on that day.
"""
events = []
hol = holiday(date)
if hol:
events.append(hol)
with contextlib.suppress(KeyError):
events.append(special_dates[date.strftime('%m-%d')])
for birthday in process_birthdays(date, birthdays):
events.append('{0[name]} {0[age]} jaar'.format(birthday))
for wedding in process_weddings(date, weddings):
events.append('{0[names]} {0[age]} jaar getrouwd'.format(wedding))
result = dict(
day=int(date.strftime('%d')),
month=date.strftime('%B'),
short_month=date.strftime('%b'),
week_day=date.strftime('%A').capitalize(),
events=events
)
log = '{0[week_day]:<10} {0[day]:>2} {0[month]}'.format(result)
if events:
log += ' ({})'.format(', '.join(events)).replace('\n', ': ')
print(log)
return result
def create_week(start_date, birthdays, weddings, special_dates):
"""
Generate a dictionary representing an entire week.
Args:
start_date (datetime.date): The first day of the week to render.
birthdays (dict): A dict of dates mapped to birthdays.
weddings (dict): A dict of dates mapped to wedding dates.
special_dates (dict): A dict mapping a date in the form '%m-%d'
to a special string to render.
Returns:
dict: A dict containing all days of the weeks, the weeknumber
and the month as a string.
"""
week_number = start_date.isocalendar()[1]
print('\n Week {: <2d}'.format(week_number))
args = birthdays, weddings, special_dates
week = dict(
weeknumber=week_number,
mon=day_to_dict(start_date, *args),
tue=day_to_dict(start_date + datetime.timedelta(days=1), *args),
wed=day_to_dict(start_date + datetime.timedelta(days=2), *args),
thu=day_to_dict(start_date + datetime.timedelta(days=3), *args),
fri=day_to_dict(start_date + datetime.timedelta(days=4), *args),
sat=day_to_dict(start_date + datetime.timedelta(days=5), *args),
sun=day_to_dict(start_date + datetime.timedelta(days=6), *args),
)
first_month = week['sun']['month'].capitalize()
last_month = week['mon']['month'].capitalize()
if first_month == last_month:
week['month'] = first_month
else:
week['month'] = '{} / {}'.format(first_month, last_month)
return week
def create_weeks_for_year(year, birthdays, weddings, special_dates):
"""
Generate all week data for a year
Args:
year (int): The year to generate week data for.
birthdays (dict): A dict of dates mapped to birthdays.
weddings (dict): A dict of dates mapped to wedding dates.
special_dates (dict): A dict mapping a date in the form '%m-%d'
to a special string to render.
Yields:
dict: All weeks for a year generated using :func:`.create_week`.
"""
first_date = start_date(year)
while first_date.year <= year:
yield create_week(first_date, birthdays, weddings, special_dates)
first_date += datetime.timedelta(days=7)
def generate(template_path, data_file, out_file=None, year=None):
"""
Generate a week calendar for an entire year in odt format.
Args:
template_path (str): The file path of the template to render.
data_file (str): The path of the configuration to load.
out_file (io.IOBase): A file-like object to write the calendar
to.
year (int): The year to render the calendar for. A year
specified in the data_file is used as a fallback value.
Raises:
.BadConfigError: If the given configuration file is missing
configurations.
"""
with open(data_file) as f:
calendar_data = yaml.load(f, yaml.Loader)
try:
year = year or calendar_data['year']
birthdays = calendar_data['birthdays']
weddings = calendar_data['weddings']
special_dates = calendar_data['special dates']
except KeyError:
raise BadConfigError()
weeks = create_weeks_for_year(year, birthdays, weddings, special_dates)
template = Template(source=None, filepath=template_path)
generated = template.generate(weeks=weeks)
data = generated.render().getvalue()
if not out_file:
out_file = open('calendar-{:d}.odt'.format(year), 'wb')
out_file.write(data)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'config',
help='The config file to use.')
parser.add_argument(
'-y', '--year',
type=int,
help='The year to render the calendar for.'
' (default: year specified in config file)')
parser.add_argument(
'-o', '--output',
type=argparse.FileType('wb'),
help='The output file to write the calendar to.'
' (default: calendar-{year}.odt)')
args = parser.parse_args()
locale.setlocale(locale.LC_TIME, 'nl_NL.utf8')
try:
template_path = os.path.join(os.path.dirname(__file__), 'template.odt')
generate(template_path, args.config, args.output, args.year)
except BadConfigError:
parser.print_help()
if __name__ == '__main__':
main()
| 31.468493 | 79 | 0.614835 |
5668835d23c61f8c8ae4ae66b5912cb0224a0677 | 24,921 | py | Python | gevent/_sslgte279.py | huskar-org/gevent | f2e15119575eb5d75e3bfee8871db9f058c7322b | [
"MIT"
] | 1 | 2019-11-06T17:36:16.000Z | 2019-11-06T17:36:16.000Z | gevent/_sslgte279.py | huskar-org/gevent | f2e15119575eb5d75e3bfee8871db9f058c7322b | [
"MIT"
] | null | null | null | gevent/_sslgte279.py | huskar-org/gevent | f2e15119575eb5d75e3bfee8871db9f058c7322b | [
"MIT"
] | 1 | 2019-10-03T06:12:11.000Z | 2019-10-03T06:12:11.000Z | # Wrapper module for _ssl. Written by Bill Janssen.
# Ported to gevent by Denis Bilenko.
"""SSL wrapper for socket objects.
For the documentation, refer to :mod:`ssl` module manual.
This module implements cooperative SSL socket wrappers.
"""
from __future__ import absolute_import
import ssl as __ssl__
_ssl = __ssl__._ssl
import errno
from gevent.socket import socket, timeout_default
from gevent.socket import error as socket_error
from gevent.hub import PYPY
__implements__ = ['SSLContext',
'SSLSocket',
'wrap_socket',
'get_server_certificate',
'create_default_context',
'_create_unverified_context',
'_create_default_https_context',
'_create_stdlib_context']
__imports__ = []
# Import all symbols from Python's ssl.py, except those that we are implementing
# and "private" symbols.
for name in dir(__ssl__):
if name in __implements__:
continue
if name.startswith('__'):
continue
if name == 'socket':
continue
value = getattr(__ssl__, name)
globals()[name] = value
__imports__.append(name)
del name, value
__all__ = __implements__ + __imports__
orig_SSLContext = __ssl__.SSLContext
class SSLContext(orig_SSLContext):
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_unverified_context(protocol=PROTOCOL_SSLv23, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
# Used by http.client if no context is explicitly passed.
_create_default_https_context = create_default_context
# Backwards compatibility alias, even though it's not a public name.
_create_stdlib_context = _create_unverified_context
class SSLSocket(socket):
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
self._makefile_refs = 0
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
raise ValueError("check_hostname requires server_hostname")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self.settimeout(sock.gettimeout())
# See if we are connected
try:
self.getpeername()
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self._sock, server_side,
server_hostname, ssl_sock=self)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except socket_error as x:
self.close()
raise x
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=0, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
while True:
try:
if buffer is not None:
return self._sslobj.read(len, buffer)
else:
return self._sslobj.read(len or 1024)
except SSLWantReadError:
if self.timeout == 0.0:
raise
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
except SSLWantWriteError:
if self.timeout == 0.0:
raise
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
except SSLError as ex:
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
while True:
try:
return self._sslobj.write(data)
except SSLError as ex:
if ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
else:
raise
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0, timeout=timeout_default):
self._checkClosed()
if timeout is timeout_default:
timeout = self.timeout
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
return self._sslobj.write(data)
except SSLWantReadError:
if self.timeout == 0.0:
return 0
self._wait(self._read_event)
except SSLWantWriteError:
if self.timeout == 0.0:
return 0
self._wait(self._write_event)
else:
return socket.send(self, data, flags, timeout)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def recvmsg(self, *args, **kwargs):
raise NotImplementedError("recvmsg not allowed on instances of %s" %
self.__class__)
def recvmsg_into(self, *args, **kwargs):
raise NotImplementedError("recvmsg_into not allowed on instances of "
"%s" % self.__class__)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def _sslobj_shutdown(self):
while True:
try:
return self._sslobj.shutdown()
except SSLError as ex:
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
elif ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise
sys.exc_clear()
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise
sys.exc_clear()
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
else:
raise
def unwrap(self):
if self._sslobj:
s = self._sslobj_shutdown()
self._sslobj = None
return socket(_sock=s) # match _ssl2; critical to drop/reuse here on PyPy
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
while True:
try:
return self._sslobj.do_handshake()
except SSLWantReadError:
if self.timeout == 0.0:
raise
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
except SSLWantWriteError:
if self.timeout == 0.0:
raise
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except socket_error:
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
def version(self):
"""
Return a string identifying the protocol version used by the
current SSL channel, or None if there is no established channel.
"""
if self._sslobj is None:
return None
return self._sslobj.version()
if PYPY or not hasattr(SSLSocket, 'timeout'):
# PyPy (and certain versions of CPython) doesn't have a direct
# 'timeout' property on raw sockets, because that's not part of
# the documented specification. We may wind up wrapping a raw
# socket (when ssl is used with PyWSGI) or a gevent socket, which
# does have a read/write timeout property as an alias for
# get/settimeout, so make sure that's always the case because
# pywsgi can depend on that.
SSLSocket.timeout = property(lambda self: self.gettimeout(),
lambda self, value: self.settimeout(value))
_SSLErrorReadTimeout = SSLError('The read operation timed out')
_SSLErrorWriteTimeout = SSLError('The write operation timed out')
_SSLErrorHandshakeTimeout = SSLError('The handshake operation timed out')
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with closing(create_connection(addr)) as sock:
with closing(context.wrap_socket(sock)) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
| 37.816388 | 116 | 0.592833 |
48a0d96f858f5db0a218ed456cf98bf540d31ad8 | 3,737 | py | Python | src/ansiblelint/rules/OctalPermissionsRule.py | ResilienceCare/ansible-lint | 90e7449ab16305813bc4ff892ecf8ae47d5a79f0 | [
"MIT"
] | 484 | 2020-12-12T06:34:22.000Z | 2022-03-21T15:30:35.000Z | src/ansiblelint/rules/OctalPermissionsRule.py | ResilienceCare/ansible-lint | 90e7449ab16305813bc4ff892ecf8ae47d5a79f0 | [
"MIT"
] | 488 | 2020-12-11T14:57:26.000Z | 2022-03-21T11:33:01.000Z | src/ansiblelint/rules/OctalPermissionsRule.py | ResilienceCare/ansible-lint | 90e7449ab16305813bc4ff892ecf8ae47d5a79f0 | [
"MIT"
] | 132 | 2020-12-13T16:58:47.000Z | 2022-03-20T15:52:24.000Z | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from typing import TYPE_CHECKING, Any, Dict, Union
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from typing import Optional
from ansiblelint.file_utils import Lintable
class OctalPermissionsRule(AnsibleLintRule):
id = 'risky-octal'
shortdesc = 'Octal file permissions must contain leading zero or be a string'
description = (
'Numeric file permissions without leading zero can behave '
'in unexpected ways. See '
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/file_module.html'
)
severity = 'VERY_HIGH'
tags = ['formatting']
version_added = 'historic'
_modules = [
'assemble',
'copy',
'file',
'ini_file',
'lineinfile',
'replace',
'synchronize',
'template',
'unarchive',
]
def is_invalid_permission(self, mode: int) -> bool:
# sensible file permission modes don't
# have write bit set when read bit is
# not set and don't have execute bit set
# when user execute bit is not set.
# also, user permissions are more generous than
# group permissions and user and group permissions
# are more generous than world permissions
other_write_without_read = (
mode % 8 and mode % 8 < 4 and not (mode % 8 == 1 and (mode >> 6) % 2 == 1)
)
group_write_without_read = (
(mode >> 3) % 8
and (mode >> 3) % 8 < 4
and not ((mode >> 3) % 8 == 1 and (mode >> 6) % 2 == 1)
)
user_write_without_read = (
(mode >> 6) % 8 and (mode >> 6) % 8 < 4 and not (mode >> 6) % 8 == 1
)
other_more_generous_than_group = mode % 8 > (mode >> 3) % 8
other_more_generous_than_user = mode % 8 > (mode >> 6) % 8
group_more_generous_than_user = (mode >> 3) % 8 > (mode >> 6) % 8
return bool(
other_write_without_read
or group_write_without_read
or user_write_without_read
or other_more_generous_than_group
or other_more_generous_than_user
or group_more_generous_than_user
)
def matchtask(
self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
) -> Union[bool, str]:
if task["action"]["__ansible_module__"] in self._modules:
mode = task['action'].get('mode', None)
if isinstance(mode, str):
return False
if isinstance(mode, int):
return self.is_invalid_permission(mode)
return False
| 37.37 | 94 | 0.641424 |
43c2dbdaef584ebd2ff5caa5dce1bfdc2dd23de4 | 6,970 | py | Python | py/desitarget/test/make_testdata.py | jinyiY/desitarget | 546a85a3feb9754a2406ebfb2b9890514f47b3bf | [
"BSD-3-Clause"
] | null | null | null | py/desitarget/test/make_testdata.py | jinyiY/desitarget | 546a85a3feb9754a2406ebfb2b9890514f47b3bf | [
"BSD-3-Clause"
] | null | null | null | py/desitarget/test/make_testdata.py | jinyiY/desitarget | 546a85a3feb9754a2406ebfb2b9890514f47b3bf | [
"BSD-3-Clause"
] | null | null | null | # - For the record (and future updates):
# ADM This code generates tractor, sweep, targets, pixweight, mask
# ADM file subsets for testing.
# - The hardcoded paths are for NERSC, but you can swap out any
# - legacy survey data release path as needed.
# ADM Now (10/04/19) based off DR8 sweeps and Tractor files.
import fitsio
import numpy as np
import numpy.lib.recfunctions as rfn
import healpy as hp
from os.path import basename
from time import time
# from astropy.io import fits
from desitarget.cuts import apply_cuts
from desitarget.cmx import cmx_cuts
from desitarget.io import read_tractor
from desitarget.targets import finalize
from desitarget.QA import _load_systematics
# from desitarget.gaiamatch import find_gaia_files
start = time()
tractordir = '/project/projectdirs/cosmo/data/legacysurvey/dr8/south/tractor/330/'
# tractordir = '/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/330/'
# tractordir = '/project/projectdirs/cosmo/data/legacysurvey/dr3.1/tractor/330'
# tractordir = '/data/legacysurvey/dr3.1/tractor/330/'
for brick in ['3301m002', '3301m007', '3303p000']:
filepath = '{}/tractor-{}.fits'.format(tractordir, brick)
desi_target, bgs_target, mws_target = apply_cuts(filepath)
# ADM as nobody is testing the MWS in the sandbox, yet, we need to
# ADM ensure we ignore MWS targets for testing the main algorithms.
yes = np.where((desi_target != 0) & (mws_target == 0))[0]
no = np.where(desi_target == 0)[0]
keep = np.concatenate([yes[0:3], no[0:3]])
data, hdr = read_tractor(filepath, header=True)
# ADM the FRACDEV and FRACDEV_IVAR columns can
# ADM contain some NaNs, which break testing.
wnan = np.where(data["FRACDEV"] != data["FRACDEV"])
if len(wnan[0]) > 0:
data["FRACDEV"][wnan] = 0.
wnan = np.where(data["FRACDEV_IVAR"] != data["FRACDEV_IVAR"])
if len(wnan[0]) > 0:
data["FRACDEV_IVAR"][wnan] = 0.
# ADM the "CONTINUE" comment keyword is not yet implemented
# ADM in fitsio, so delete it to prevent fitsio barfing on headers.
hdr.delete("CONTINUE")
fitsio.write('t/'+basename(filepath), data[keep], header=hdr, clobber=True)
print('made Tractor file for brick {}...t={:.2f}s'.format(brick, time()-start))
sweepdir = '/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
# sweepdir = '/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
# sweepdir = '/project/projectdirs/cosmo/data/legacysurvey/dr3.1/sweep/3.1'
# sweepdir = '/data/legacysurvey/dr2p/sweep/'
for radec in ['310m005-320p000', '320m005-330p000', '330m005-340p000']:
filepath = '{}/sweep-{}.fits'.format(sweepdir, radec)
desi_target, bgs_target, mws_target = apply_cuts(filepath)
cmx_target = cmx_cuts.apply_cuts(filepath)
# ADM as nobody is testing the MWS in the sandbox, yet, we need to.
# ADM ensure we ignore MWS targets for testing the main algorithms.
yes = np.where((desi_target != 0) & (mws_target == 0))[0]
no = np.where(desi_target == 0)[0]
keep = np.concatenate([yes[0:3], no[0:3]])
data, hdr = read_tractor(filepath, header=True)
# ADM the "CONTINUE" comment keyword is not yet implemented
# ADM in fitsio, so delete it to prevent fitsio barfing on headers.
hdr.delete("CONTINUE")
fitsio.write('t/'+basename(filepath), data[keep], header=hdr, clobber=True)
print('made sweeps file for range {}...t={:.2f}s'.format(radec, time()-start))
# ADM only need to write out one set of targets. So fine outside of loop.
# ADM create a targets file for testing QA (main survey and commissioning)
# ADM we get more test coverage if one file has > 1000 targets.
many = yes[:1001]
targets = finalize(data[many], desi_target[many],
bgs_target[many], mws_target[many])
cmx_targets = finalize(data[keep], desi_target[keep],
bgs_target[keep], mws_target[keep], survey='cmx')
# ADM remove some columns from the target file that aren't needed for
# ADM testing. It's a big file.
needtargs = np.empty(
len(many), dtype=[('RA', '>f8'), ('DEC', '>f8'), ('RELEASE', '>i2'),
('FLUX_G', '>f4'), ('FLUX_R', '>f4'), ('FLUX_Z', '>f4'),
('FLUX_W1', '>f4'), ('FLUX_W2', '>f4'), ('MW_TRANSMISSION_G', '>f4'),
('MW_TRANSMISSION_R', '>f4'), ('MW_TRANSMISSION_Z', '>f4'),
('MW_TRANSMISSION_W1', '>f4'), ('MW_TRANSMISSION_W2', '>f4'),
('PARALLAX', '>f4'), ('PMRA', '>f4'), ('PMDEC', '>f4'),
('DESI_TARGET', '<i8'), ('BGS_TARGET', '<i8'), ('MWS_TARGET', '<i8')]
)
for col in needtargs.dtype.names:
needtargs[col] = targets[col]
fitsio.write('t/targets.fits', needtargs, extname='TARGETS', header=hdr, clobber=True)
fitsio.write('t/cmx-targets.fits', cmx_targets, extname='TARGETS', header=hdr, clobber=True)
# ADM as of DR7, ignore the Gaia files
# ADM adding Gaia files to which to match
# for brick in ['3301m002', '3301m007', '3303p000']:
# filepath = '{}/tractor-{}.fits'.format(tractordir, brick)
# data = fitsio.read('t/'+basename(filepath))
# # ADM use find_gaia_files to determine which Gaia files potentially
# # ADM match the sweeps objects of interest
# for gaiafile in find_gaia_files(data):
# # ADM for each of the relevant Gaia files, read the first 5 rows
# gaiadata = fitsio.read(gaiafile, rows=range(5))
# # ADM and write them to a special Gaia directory
# fitsio.write('tgaia/'+basename(gaiafile), gaiadata, clobber=True)
# for radec in ['310m005-320p000', '320m005-330p000', '330m005-340p000']:
# filepath = '{}/sweep-{}.fits'.format(sweepdir, radec)
# data = fitsio.read('t/'+basename(filepath))
# # ADM use find_gaia_files to determine which Gaia files potentially
# # ADM match the sweeps objects of interest
# for gaiafile in find_gaia_files(data):
# # ADM for each of the relevant Gaia files, read the first 5 rows
# gaiadata = fitsio.read(gaiafile, rows=range(5))
# # ADM and write them to a special Gaia directory
# fitsio.write('tgaia/'+basename(gaiafile), gaiadata, clobber=True)
# ADM adding a file to make a mask for bright stars
# ADM this should go in its own directory /t2 (others are in t1)
filepath = '{}/sweep-{}.fits'.format(sweepdir, '190m005-200p000')
data, hdr = read_tractor(filepath, header=True)
# ADM the "CONTINUE" comment keyword is not yet implemented
# ADM in fitsio, so delete it to prevent fitsio barfing on headers
hdr.delete("CONTINUE")
keep = np.where(data["FLUX_Z"] > 100000)
fitsio.write('t2/'+basename(filepath), data[keep], header=hdr, clobber=True)
# ADM adding a fake pixel weight map
sysdic = _load_systematics()
npix = hp.nside2npix(2)
pixmap = np.ones(npix, dtype=[(k, '>f4') for k in sysdic.keys()])
pixmap = rfn.append_fields(pixmap, "ALL", np.ones(npix), dtypes='>f4')
fitsio.write('t/pixweight.fits', pixmap, clobber=True)
print('Done...t={:.2f}s'.format(time()-start))
| 49.084507 | 92 | 0.678479 |
651417713c5247fb342b5d877e2b1430a15cc08f | 1,315 | py | Python | downloader.py | ikaruswill/sg-bus-router | ec25d7d0f20fe622967b9ec53faab880511d7d46 | [
"MIT"
] | 2 | 2018-10-24T06:51:41.000Z | 2018-10-25T10:49:26.000Z | downloader.py | ikaruswill/sg-bus-router | ec25d7d0f20fe622967b9ec53faab880511d7d46 | [
"MIT"
] | null | null | null | downloader.py | ikaruswill/sg-bus-router | ec25d7d0f20fe622967b9ec53faab880511d7d46 | [
"MIT"
] | null | null | null | import requests
from time import sleep
import pandas as pd
from sqlalchemy import create_engine
from keys import api_key
# Variables
API_PATHS = ['BusStops', 'BusRoutes']
DB_TABLES = ['bus_stops', 'bus_routes']
# Options
DB_PATH = 'sqlite:///sg-bus-router.db'
LOG_INTERVAL = 1000
# Constants
API_URL_FORMAT = 'http://datamall2.mytransport.sg/ltaodataservice/{}'
API_JSON_KEY = 'value'
def main():
db_conn = create_engine(DB_PATH)
headers = {
'AccountKey': api_key,
'Accept': 'application/json'
}
for i in range(len(API_PATHS)):
params = {
'$skip': 0
}
url = API_URL_FORMAT.format(API_PATHS[i])
while True:
data_chunk = requests.get(
url=url, headers=headers, params=params).json()[API_JSON_KEY]
if len(data_chunk) == 0:
break
df = pd.DataFrame(data_chunk)
df.to_sql(name=DB_TABLES[i], con=db_conn, if_exists='append',
index=False)
# Log progress
params['$skip'] += len(data_chunk)
if params['$skip'] % LOG_INTERVAL == 0:
print('Downloaded {}'.format(params['$skip']))
# # Avoid triggering rate limits
# sleep(1)
if __name__ == '__main__':
main()
| 24.811321 | 77 | 0.580989 |
a3eb9632a34e8e117070fc9cb24900c4aa64261a | 37,704 | py | Python | tests/requests/tests.py | andreip/django | c61d1361d027a729d07d277879950ff133c19f4c | [
"PSF-2.0",
"BSD-3-Clause"
] | 4 | 2020-04-08T17:57:46.000Z | 2021-11-08T08:56:16.000Z | tests/requests/tests.py | andreip/django | c61d1361d027a729d07d277879950ff133c19f4c | [
"PSF-2.0",
"BSD-3-Clause"
] | 7 | 2018-06-18T17:56:50.000Z | 2020-06-24T16:51:04.000Z | tests/requests/tests.py | andreip/django | c61d1361d027a729d07d277879950ff133c19f4c | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2019-08-19T20:41:48.000Z | 2019-10-10T17:29:52.000Z | import time
from datetime import datetime, timedelta
from http import cookies
from io import BytesIO
from itertools import chain
from urllib.parse import urlencode
from django.core.exceptions import DisallowedHost
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import (
HttpRequest, HttpResponse, RawPostDataException, UnreadablePostError,
)
from django.http.request import split_domain_port
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
from django.test.utils import freeze_time
from django.utils.http import cookie_date
from django.utils.timezone import utc
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(list(request.META), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
self.assertIsNone(request.content_type)
self.assertIsNone(request.content_params)
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = request.path_info = '/;some/?awful/=path/foo:bar/'
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = request.path_info = '/foo#bar'
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<HttpRequest: GET '/somepath/'>")
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), "<HttpRequest>")
def test_wsgirequest(self):
request = WSGIRequest({
'PATH_INFO': 'bogus',
'REQUEST_METHOD': 'bogus',
'CONTENT_TYPE': 'text/html; charset=utf8',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(
set(request.META),
{'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'CONTENT_TYPE', 'wsgi.input'}
)
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
self.assertEqual(request.content_type, 'text/html')
self.assertEqual(request.content_params, {'charset': 'utf8'})
def test_wsgirequest_with_script_name(self):
"""
The request's path is correctly assembled, regardless of whether or
not the SCRIPT_NAME has a trailing slash (#20169).
"""
# With trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_script_url_double_slashes(self):
"""
WSGI squashes multiple successive slashes in PATH_INFO, WSGIRequest
should take that into account when populating request.path and
request.META['SCRIPT_NAME'] (#17133).
"""
request = WSGIRequest({
'SCRIPT_URL': '/mst/milestones//accounts/login//help',
'PATH_INFO': '/milestones/accounts/login/help',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/mst/milestones/accounts/login/help')
self.assertEqual(request.META['SCRIPT_NAME'], '/mst')
def test_wsgirequest_with_force_script_name(self):
"""
The FORCE_SCRIPT_NAME setting takes precedence over the request's
SCRIPT_NAME environment parameter (#20169).
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({
'PATH_INFO': '/somepath/',
'SCRIPT_NAME': '/PREFIX/',
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
The request's path is correctly assembled, regardless of whether or not
the FORCE_SCRIPT_NAME setting has a trailing slash (#20169).
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), "<WSGIRequest: GET '/'>")
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), "<WSGIRequest: GET '/somepath/'>")
def test_wsgirequest_path_info(self):
def wsgi_str(path_info, encoding='utf-8'):
path_info = path_info.encode(encoding) # Actual URL sent by the browser (bytestring)
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
# The URL may be incorrectly encoded in a non-UTF-8 encoding (#26971)
request = WSGIRequest({
'PATH_INFO': wsgi_str("/café/", encoding='iso-8859-1'),
'REQUEST_METHOD': 'get',
'wsgi.input': BytesIO(b''),
})
# Since it's impossible to decide the (wrong) encoding of the URL, it's
# left percent-encoded in the path.
self.assertEqual(request.path, "/caf%E9/")
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(
request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf'
)
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(
request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons'
)
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_create_cookie_after_deleting_cookie(self):
"""
Setting a cookie after deletion should clear the expiry date.
"""
response = HttpResponse()
response.set_cookie('c', 'old-value')
self.assertEqual(response.cookies['c']['expires'], '')
response.delete_cookie('c')
self.assertEqual(response.cookies['c']['expires'], 'Thu, 01-Jan-1970 00:00:00 GMT')
response.set_cookie('c', 'new-value')
self.assertEqual(response.cookies['c']['expires'], '')
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertIn(
datetime_cookie['expires'],
# assertIn accounts for slight time dependency (#23450)
('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT')
)
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
set_cookie_time = time.time()
with freeze_time(set_cookie_time):
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(set_cookie_time + 10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertIn('; %s' % cookies.Morsel._reserved['httponly'], str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_unicode_cookie(self):
"Verify HttpResponse.set_cookie() works with unicode data."
response = HttpResponse()
cookie_value = '清風'
response.set_cookie('test', cookie_value)
self.assertEqual(cookie_value, response.cookies['test'].value)
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(2), b'na')
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
with self.assertRaises(RawPostDataException):
request.body
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_immutable_for_mutipart(self):
"""
MultiPartParser.parse() leaves request.POST immutable.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--',
]))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertFalse(request.POST._mutable)
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
payload = FakePayload('name=Hello Günter')
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.POST, {'name': ['Hello GĂŒnter']})
def test_set_encoding_clears_GET(self):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': '',
'QUERY_STRING': 'name=Hello%20G%C3%BCnter',
})
self.assertEqual(request.GET, {'name': ['Hello Günter']})
request.encoding = 'iso-8859-16'
self.assertEqual(request.GET, {'name': ['Hello G\u0102\u0152nter']})
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'x'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.FILES
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_raw_uri(self):
factory = RequestFactory(HTTP_HOST='evil.com')
request = factory.get('////absolute-uri')
self.assertEqual(request.get_raw_uri(), 'http://evil.com//absolute-uri')
request = factory.get('/?foo=bar')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/?foo=bar')
request = factory.get('/path/with:colons')
self.assertEqual(request.get_raw_uri(), 'http://evil.com/path/with:colons')
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'example.com@evil.tld',
'example.com:dr.frankenstein@evil.tld',
'example.com:dr.frankenstein@evil.tld:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com', '[::ffff:169.254.169.254]',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
'[::ffff:169.254.169.254]',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_PORT=False)
def test_get_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Shouldn't use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '8080')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(USE_X_FORWARDED_PORT=True)
def test_get_port_with_x_forwarded_port(self):
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_PORT': '80',
}
# Should use the X-Forwarded-Port header
self.assertEqual(request.get_port(), '80')
request = HttpRequest()
request.META = {
'SERVER_PORT': '8080',
}
self.assertEqual(request.get_port(), '8080')
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_in_debug_mode(self):
"""
If ALLOWED_HOSTS is empty and DEBUG is True, variants of localhost are
allowed.
"""
valid_hosts = ['localhost', '127.0.0.1', '[::1]']
for host in valid_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertEqual(request.get_host(), host)
# Other hostnames raise a DisallowedHost.
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {'HTTP_HOST': 'example.com'}
request.get_host()
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion % (host, host)):
request.get_host()
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion % (host, domain)):
request.get_host()
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
with self.assertRaisesMessage(DisallowedHost, msg_invalid_host % host):
request.get_host()
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
with self.assertRaisesMessage(DisallowedHost, msg_suggestion2 % "invalid_hostname.com"):
request.get_host()
def test_split_domain_port_removes_trailing_dot(self):
domain, port = split_domain_port('example.com.:8080')
self.assertEqual(domain, 'example.com')
self.assertEqual(port, '8080')
class BuildAbsoluteURITestCase(SimpleTestCase):
"""
Regression tests for ticket #18314.
"""
def setUp(self):
self.factory = RequestFactory()
def test_build_absolute_uri_no_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
the ``location`` argument is not provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(),
'http://testserver//absolute-uri'
)
def test_build_absolute_uri_absolute_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
an absolute URL ``location`` argument is provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='http://example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_schema_relative_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
a schema-relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='//example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_relative_location(self):
"""
``request.build_absolute_uri()`` returns the proper value when
a relative URL ``location`` argument is provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='/foo/bar/'),
'http://testserver/foo/bar/'
)
| 40.805195 | 117 | 0.588107 |
44dc199f909488d0318fa47d24a511210b9266a5 | 603 | py | Python | tronapi/utils/crypto.py | sonicskye/tron-api-python | eba4d5d1eb27b95ea69e2f9e0fbf05c819012b7e | [
"MIT"
] | 1 | 2018-12-20T22:09:39.000Z | 2018-12-20T22:09:39.000Z | tronapi/utils/crypto.py | sonicskye/tron-api-python | eba4d5d1eb27b95ea69e2f9e0fbf05c819012b7e | [
"MIT"
] | null | null | null | tronapi/utils/crypto.py | sonicskye/tron-api-python | eba4d5d1eb27b95ea69e2f9e0fbf05c819012b7e | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) iEXBase. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Union
from eth_hash.auto import keccak as keccak_256
from eth_utils import to_bytes
def keccak(
primitive: Union[bytes, int, bool] = None, hexstr: str = None, text: str = None
) -> bytes:
return keccak_256(to_bytes(primitive, hexstr, text))
| 37.6875 | 94 | 0.512438 |
dba4c1ec411c29f7d978e2358ca0393138738dab | 3,152 | py | Python | tools/network/resolve.py | wxyyxc1992/ChaosWarden | 831ab3eaa232af7c8e9803c496a9ed1af7494552 | [
"MIT"
] | 123 | 2019-03-28T04:36:08.000Z | 2019-11-02T12:30:43.000Z | tools/network/resolve.py | wxyyxc1992/ChaosWarden | 831ab3eaa232af7c8e9803c496a9ed1af7494552 | [
"MIT"
] | 1 | 2020-10-12T07:37:12.000Z | 2020-10-12T07:37:45.000Z | tools/network/resolve.py | wxyyxc1992/ChaosWarden | 831ab3eaa232af7c8e9803c496a9ed1af7494552 | [
"MIT"
] | 15 | 2020-11-11T18:47:58.000Z | 2022-03-11T14:59:53.000Z | #!/usr/bin/python3.5
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import socket
import argparse
from colored import fg, bg, attr
from threading import Thread
from queue import Queue
from multiprocessing.dummy import Pool
def banner():
print("""
_
_ __ ___ ___ ___ | |_ _____ _ __ _ _
| '__/ _ \/ __|/ _ \| \ \ / / _ \ | '_ \| | | |
| | | __/\__ \ (_) | |\ V / __/ _ | |_) | |_| |
|_| \___||___/\___/|_| \_/ \___| (_) | .__/ \__, |
|_| |___/
by @gwendallecoguic
""")
pass
banner()
parser = argparse.ArgumentParser()
parser.add_argument( "-o","--host",help="set hosts file list" )
parser.add_argument( "-t","--threads",help="threads, default 10" )
parser.add_argument( "-i","--ip",help="also store the ip address", action="store_true" )
parser.parse_args()
args = parser.parse_args()
if args.threads:
_threads = int(args.threads)
else:
_threads = 10
if args.ip:
_store_ip = True
else:
_store_ip = False
t_hosts = []
if args.host:
if os.path.isfile(args.host):
fp = open( args.host, 'r' )
t_hosts = fp.read().split("\n")
fp.close()
n_host = len(t_hosts)
if not n_host:
parser.error( 'hosts list missing' )
sys.stdout.write( '%s[+] %d hosts loaded: %s%s\n' % (fg('green'),n_host,args.host,attr(0)) )
def resolve( host, store_ip ):
host = host.strip()
if not len(host):
return
if t_multiproc['n_current']%5000 == 0:
save( store_ip )
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
try:
ip = socket.gethostbyname( host )
t_alive[host] = ip
# print(ip)
except Exception as e:
t_dead.append( host )
# sys.stdout.write( "%s[-] error occurred: %s (%s)%s\n" % (fg('red'),e,host,attr(0)) )
t_alive = {}
t_dead = []
t_multiproc = {
'n_current': 0,
'n_total': n_host
}
# pool = Pool( _threads )
# pool.map( resolve, t_hosts )
# pool.close()
# pool.join()
def save( store_ip ):
fp = open( 'hosts_alive', 'w' )
for h in sorted(t_alive.keys()):
if store_ip:
fp.write( "%s:%s\n" % (h,t_alive[h]) )
else:
fp.write( "%s\n" % h )
fp.close()
fp = open( 'hosts_dead', 'w' )
for h in t_dead:
fp.write( "%s\n" % h )
fp.close()
def doWork():
while True:
host = q.get()
resolve( host, _store_ip )
q.task_done()
q = Queue( _threads*2 )
for i in range(_threads):
t = Thread( target=doWork )
t.daemon = True
t.start()
try:
for host in t_hosts:
q.put( host )
q.join()
except KeyboardInterrupt:
sys.exit(1)
# print( t_alive)
# print( t_dead)
sys.stdout.write( '%s[+] %d hosts alive, %d dead hosts%s\n' % (fg('green'),len(t_alive),len(t_dead),attr(0)) )
save( _store_ip )
exit()
| 22.35461 | 110 | 0.538388 |
56fabac8f04a875b3de43d10b742e56d8d928e55 | 339 | py | Python | scrapy_statsd/tests/test_project/test_project/spiders/test.py | vinayan3/scrapy-statsd | 12d90c6ac8c20f3c26e24b603e8b3c732ee6d4fc | [
"MIT"
] | 8 | 2016-08-14T22:17:37.000Z | 2019-01-03T08:45:17.000Z | scrapy_statsd/tests/test_project/test_project/spiders/test.py | TeamHG-Memex/scrapy-statsd | 7bbf8c6aca71ee8a8481a560c63bead880d7843d | [
"MIT"
] | 2 | 2016-09-10T19:46:01.000Z | 2018-04-12T22:53:24.000Z | scrapy_statsd/tests/test_project/test_project/spiders/test.py | TeamHG-Memex/scrapy-statsd | 7bbf8c6aca71ee8a8481a560c63bead880d7843d | [
"MIT"
] | 6 | 2016-08-22T10:21:06.000Z | 2022-03-16T00:51:53.000Z | # -*- coding: utf-8 -*-
import scrapy
from scrapy_statsd.tests.test_project.test_project.items import TestProjectItem
class TestSpider(scrapy.Spider):
name = "test"
allowed_domains = ["github.com"]
start_urls = (
'https://github.com/scrapy/scrapy',
)
def parse(self, response):
yield TestProjectItem() | 26.076923 | 79 | 0.675516 |
bef5cd36989bc522d176248bbd3d6395eeafe6d4 | 361 | py | Python | community/migrations/0002_rename_business_name_business_name.py | kasparov-creat/Neighborhood-Watch | 9d1ee930adaf640155fea27f686c0b231989ff1a | [
"MIT"
] | null | null | null | community/migrations/0002_rename_business_name_business_name.py | kasparov-creat/Neighborhood-Watch | 9d1ee930adaf640155fea27f686c0b231989ff1a | [
"MIT"
] | null | null | null | community/migrations/0002_rename_business_name_business_name.py | kasparov-creat/Neighborhood-Watch | 9d1ee930adaf640155fea27f686c0b231989ff1a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-05 11:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('community', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='business',
old_name='business_name',
new_name='name',
),
]
| 19 | 47 | 0.581717 |
b2afa5f13770df11ffa9e11eb45019226e18f84f | 8,139 | py | Python | pymatgen/analysis/nmr.py | oxana-a/pymatgen | 69393c5fbf88c3e9f148a91c090b4e2f02ac664d | [
"MIT"
] | 2 | 2019-03-14T17:58:33.000Z | 2021-01-26T13:17:59.000Z | pymatgen/analysis/nmr.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | null | null | null | pymatgen/analysis/nmr.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 1 | 2015-10-12T20:03:27.000Z | 2015-10-12T20:03:27.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.core.tensors import SquareTensor
from collections import namedtuple
from pymatgen.core.units import FloatWithUnit
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Site
import numpy as np
"""
A module for NMR analysis
"""
__author__ = "Shyam Dwaraknath"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.2"
__maintainer__ = "Shyam Dwaraknath"
__credits__ = "Xiaohui Qu"
__email__ = "shyamd@lbl.gov"
__date__ = "Mar 1, 2018"
class ChemicalShielding(SquareTensor):
"""
This class extends the SquareTensor to perform extra analysis unique to
NMR Chemical shielding tensors
Three notations to describe chemical shielding tensor (RK Harris; Magn. Reson.
Chem. 2008, 46, 582–598; DOI: 10.1002/mrc.2225) are supported.
Authors: Shyam Dwaraknath, Xiaohui Qu
"""
HaeberlenNotation = namedtuple(typename="HaeberlenNotion", field_names="sigma_iso, delta_sigma_iso, zeta, eta")
MehringNotation = namedtuple(typename="MehringNotation", field_names="sigma_iso, sigma_11, sigma_22, sigma_33")
MarylandNotation = namedtuple(typename="MarylandNotation", field_names="sigma_iso, omega, kappa")
def __new__(cls, cs_matrix, vscale=None):
"""
Create a Chemical Shielding tensor.
Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
cs_matrix (1x3 or 3x3 array-like): the 3x3 array-like
representing the chemical shielding tensor
or a 1x3 array of the primary sigma values corresponding
to the principal axis system
vscale (6x1 array-like): 6x1 array-like scaling the
voigt-notation vector with the tensor entries
"""
t_array = np.array(cs_matrix)
if t_array.shape == (3, ):
return super().__new__(cls, np.diag(cs_matrix), vscale)
elif t_array.shape == (3, 3):
return super().__new__(cls, cs_matrix, vscale)
@property
def principal_axis_system(self):
"""
Returns a chemical shielding tensor aligned to the principle axis system
so that only the 3 diagnol components are non-zero
"""
return ChemicalShielding(np.diag(np.sort(np.linalg.eigvals(self.symmetrized))))
@property
def haeberlen_values(self):
"""
Returns: the Chemical shielding tensor in Haeberlen Notation
"""
pas=self.principal_axis_system
sigma_iso=pas.trace() / 3
sigmas=np.diag(pas)
sigmas=sorted(sigmas, key=lambda x: np.abs(x - sigma_iso))
sigma_yy, sigma_xx, sigma_zz=sigmas
delta_sigma=sigma_zz - 0.5 * (sigma_xx + sigma_yy)
zeta=sigma_zz - sigma_iso
eta=(sigma_yy - sigma_xx) / zeta
return self.HaeberlenNotation(sigma_iso, delta_sigma, zeta, eta)
@property
def mehring_values(self):
"""
Returns: the Chemical shielding tensor in Mehring Notation
"""
pas=self.principal_axis_system
sigma_iso=pas.trace() / 3
sigma_11, sigma_22, sigma_33=np.diag(pas)
return self.MehringNotation(sigma_iso, sigma_11, sigma_22, sigma_33)
@property
def maryland_values(self):
"""
Returns: the Chemical shielding tensor in Maryland Notation
"""
pas=self.principal_axis_system
sigma_iso=pas.trace() / 3
omega=np.diag(pas)[2] - np.diag(pas)[0]
# There is a typo in equation 20 from Magn. Reson. Chem. 2008, 46, 582–598, the sign is wrong.
# There correct order is presented in Solid State Nucl. Magn. Reson. 1993, 2, 285-288.
kappa=3.0 * (np.diag(pas)[1] - sigma_iso) / omega
return self.MarylandNotation(sigma_iso, omega, kappa)
@classmethod
def from_maryland_notation(cls, sigma_iso, omega, kappa):
sigma_22=sigma_iso + kappa * omega / 3.0
sigma_11=(3.0 * sigma_iso - omega - sigma_22) / 2.0
sigma_33=3.0 * sigma_iso - sigma_22 - sigma_11
return cls(np.diag([sigma_11, sigma_22, sigma_33]))
class ElectricFieldGradient(SquareTensor):
"""
This class extends the SquareTensor to perform extra analysis unique to
NMR Electric Field Gradient tensors in units of V/Angstrom^2
Authors: Shyam Dwaraknath, Xiaohui Qu
"""
def __new__(cls, efg_matrix, vscale=None):
"""
Create a Chemical Shielding tensor.
Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
efg_matrix (1x3 or 3x3 array-like): the 3x3 array-like
representing the electric field tensor
or a 1x3 array of the primary values corresponding
to the principal axis system
vscale (6x1 array-like): 6x1 array-like scaling the
voigt-notation vector with the tensor entries
"""
t_array=np.array(efg_matrix)
if t_array.shape == (3, ):
return super().__new__(cls, np.diag(efg_matrix), vscale)
elif t_array.shape == (3, 3):
return super().__new__(cls, efg_matrix, vscale)
@property
def principal_axis_system(self):
"""
Returns a electric field gradient tensor aligned to the principle axis system so that only the 3 diagnol components are non-zero
"""
return ElectricFieldGradient(np.diag(np.sort(np.linalg.eigvals(self))))
@property
def V_xx(self):
diags=np.diag(self.principal_axis_system)
return sorted(diags, key=np.abs)[0]
@property
def V_yy(self):
diags=np.diag(self.principal_axis_system)
return sorted(diags, key=np.abs)[1]
@property
def V_zz(self):
diags=np.diag(self.principal_axis_system)
return sorted(diags, key=np.abs)[2]
@property
def asymmetry(self):
"""
Asymmetry of the electric field tensor defined as:
(V_yy - V_xx)/V_zz
"""
diags=np.diag(self.principal_axis_system)
V=sorted(diags, key=np.abs)
return np.abs((V[1] - V[0]) / V[2])
def coupling_constant(self, specie):
"""
Computes the couplling constant C_q as defined in:
Wasylishen R E, Ashbrook S E, Wimperis S. NMR of quadrupolar nuclei
in solid materials[M]. John Wiley & Sons, 2012. (Chapter 3.2)
C_q for a specific atom type for this electric field tensor:
C_q=e*Q*V_zz/h
h: planck's constant
Q: nuclear electric quadrupole moment in mb (millibarn
e: elementary proton charge
Args:
specie: flexible input to specify the species at this site.
Can take a isotope or element string, Specie object,
or Site object
Return:
the coupling constant as a FloatWithUnit in MHz
"""
planks_constant=FloatWithUnit(6.62607004E-34, "m^2 kg s^-1")
Vzz=FloatWithUnit(self.V_zz, "V ang^-2")
e=FloatWithUnit(-1.60217662E-19, "C")
# Convert from string to Specie object
if isinstance(specie, str):
# isotope was provided in string format
if len(specie.split("-")) > 1:
isotope=str(specie)
specie=Specie(specie.split("-")[0])
Q=specie.get_nmr_quadrupole_moment(isotope)
else:
specie=Specie(specie)
Q=specie.get_nmr_quadrupole_moment()
elif isinstance(specie, Site):
specie=specie.specie
Q=specie.get_nmr_quadrupole_moment()
elif isinstance(specie, Specie):
Q=specie.get_nmr_quadrupole_moment()
else:
raise ValueError("Invalid speciie provided for quadrupolar coupling constant calcuations")
return (e * Q * Vzz / planks_constant).to("MHz")
| 36.334821 | 136 | 0.640128 |
b624b88f0a445fd19b5c2b241266cb863e45d590 | 2,927 | py | Python | concordia/management/commands/print_frontend_test_urls.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | 1 | 2020-07-17T14:41:41.000Z | 2020-07-17T14:41:41.000Z | concordia/management/commands/print_frontend_test_urls.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | null | null | null | concordia/management/commands/print_frontend_test_urls.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | null | null | null | """
Print a list of URLs using the local database suitable for front-end testing
"""
from urllib.parse import urljoin
from django.core.management.base import BaseCommand
from django.urls import reverse
from concordia.models import Asset
class Command(BaseCommand):
help = "Print URLs for front-end testing"
def add_arguments(self, parser):
parser.add_argument(
"--base-url",
default="http://localhost:8000/",
help="Change the base URL for all generated URLs from %(default)s",
)
def handle(self, *, base_url, **options):
paths = [
reverse("homepage"),
reverse("about"),
reverse("contact"),
# Help pages
reverse("help-center"),
reverse("welcome-guide"),
reverse("how-to-transcribe"),
reverse("how-to-review"),
reverse("how-to-tag"),
reverse("for-educators"),
reverse("questions"),
# Account pages
reverse("registration_register"),
reverse("registration_login"),
reverse("password_reset"),
reverse("login"),
reverse("transcriptions:campaign-list"),
]
# Database content
# First we'll find an asset which is actually visible:
asset_qs = Asset.objects.filter(
published=True,
item__published=True,
item__project__published=True,
item__project__campaign__published=True,
)
if asset_qs.exists():
asset = asset_qs.first()
item = asset.item
project = item.project
campaign = project.campaign
paths.extend(
[
reverse(
"transcriptions:asset-detail",
kwargs={
"campaign_slug": campaign.slug,
"project_slug": project.slug,
"item_id": item.item_id,
"slug": asset.slug,
},
),
reverse(
"transcriptions:item-detail",
kwargs={
"campaign_slug": campaign.slug,
"project_slug": project.slug,
"item_id": item.item_id,
},
),
reverse(
"transcriptions:project-detail",
kwargs={"campaign_slug": campaign.slug, "slug": project.slug},
),
reverse(
"transcriptions:campaign-detail", kwargs={"slug": campaign.slug}
),
]
)
for path in sorted(paths):
print(urljoin(base_url, path))
| 33.261364 | 88 | 0.476256 |
ca775201be75e978a96e3c53e4b593751f70f062 | 4,621 | py | Python | alf/torchattacks/attacks/multiattack.py | Aminullah6264/gpvi_plus_updated_adv | 449cb2594a1a9ee158af19984c4caaf7d86f1e7f | [
"Apache-2.0"
] | 1 | 2022-02-15T07:18:32.000Z | 2022-02-15T07:18:32.000Z | alf/torchattacks/attacks/multiattack.py | Aminullah6264/gpvi_plus_updated_adv | 449cb2594a1a9ee158af19984c4caaf7d86f1e7f | [
"Apache-2.0"
] | null | null | null | alf/torchattacks/attacks/multiattack.py | Aminullah6264/gpvi_plus_updated_adv | 449cb2594a1a9ee158af19984c4caaf7d86f1e7f | [
"Apache-2.0"
] | null | null | null | import copy
import torch
from ..attack import Attack
class MultiAttack(Attack):
r"""
MultiAttack is a class to attack a model with various attacks agains same images and labels.
Arguments:
model (nn.Module): model to attack.
attacks (list): list of attacks.
Examples::
>>> atk1 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk2 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk = torchattacks.MultiAttack([atk1, atk2])
>>> adv_images = attack(images, labels)
"""
def __init__(self, attacks, verbose=False):
# Check validity
ids = []
for attack in attacks:
ids.append(id(attack.model))
if len(set(ids)) != 1:
raise ValueError("At least one of attacks is referencing a different model.")
super().__init__("MultiAttack", attack.model)
self.attacks = attacks
self.verbose = verbose
self._accumulate_multi_atk_records = False
self._multi_atk_records = [0.0]
self._supported_mode = ['default']
def forward(self, images, labels):
r"""
Overridden.
"""
batch_size = images.shape[0]
fails = torch.arange(batch_size).to(self.device)
final_images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
multi_atk_records = [batch_size]
for _, attack in enumerate(self.attacks):
adv_images = attack(images[fails], labels[fails])
outputs = self.model(adv_images)
_, pre = torch.max(outputs.data, 1)
corrects = (pre == labels[fails])
wrongs = ~corrects
succeeds = torch.masked_select(fails, wrongs)
succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), wrongs)
final_images[succeeds] = adv_images[succeeds_of_fails]
fails = torch.masked_select(fails, corrects)
multi_atk_records.append(len(fails))
if len(fails) == 0:
break
if self.verbose:
print(self._return_sr_record(multi_atk_records))
if self._accumulate_multi_atk_records:
self._update_multi_atk_records(multi_atk_records)
return final_images
def _clear_multi_atk_records(self):
self._multi_atk_records = [0.0]
def _covert_to_success_rates(self, multi_atk_records):
sr = [((1-multi_atk_records[i]/multi_atk_records[0])*100) for i in range(1, len(multi_atk_records))]
return sr
def _return_sr_record(self, multi_atk_records):
sr = self._covert_to_success_rates(multi_atk_records)
return "Attack success rate: "+" | ".join(["%2.2f %%"%item for item in sr])
def _update_multi_atk_records(self, multi_atk_records):
for i, item in enumerate(multi_atk_records):
self._multi_atk_records[i] += item
def save(self, data_loader, save_path=None, verbose=True, return_verbose=False, save_pred=False):
r"""
Overridden.
"""
if (verbose==False) and (return_verbose==True):
raise ValueError("Verobse should be True if return_verbose==True.")
self._clear_multi_atk_records()
prev_verbose = self.verbose
self.verbose = False
self._accumulate_multi_atk_records = True
for i, attack in enumerate(self.attacks):
self._multi_atk_records.append(0.0)
if verbose:
rob_acc, l2, elapsed_time = super().save(data_loader, save_path,
verbose=True, return_verbose=True,
save_pred=save_pred)
sr = self._covert_to_success_rates(self._multi_atk_records)
else:
super().save(data_loader, save_path, verbose=False,
return_verbose=False, save_pred=save_pred)
self._clear_multi_atk_records()
self._accumulate_multi_atk_records = False
self.verbose = prev_verbose
if return_verbose:
return rob_acc, sr, l2, elapsed_time
def _save_print(self, progress, rob_acc, l2, elapsed_time, end):
r"""
Overridden.
"""
print("- Save progress: %2.2f %% / Robust accuracy: %2.2f %%"%(progress, rob_acc)+\
" / "+self._return_sr_record(self._multi_atk_records)+\
' / L2: %1.5f (%2.3f it/s) \t'%(l2, elapsed_time), end=end)
| 35.274809 | 108 | 0.608093 |
34f7706cd23cc181bdb5708b2f9cdaa817556e5e | 3,248 | py | Python | pychemia/runner/pbs.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | 1 | 2021-03-26T12:34:45.000Z | 2021-03-26T12:34:45.000Z | pychemia/runner/pbs.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | null | null | null | pychemia/runner/pbs.py | quanshengwu/PyChemia | 98e9f7a1118b694dbda3ee75411ff8f8d7b9688b | [
"MIT"
] | null | null | null | import datetime
import os
import socket
import subprocess
import xml.etree.ElementTree as ElementTree
class PBSRunner:
def __init__(self, workdir, filename='batch.pbs'):
self.template = None
self.walltime = None
self.template = None
self.queue = None
self.mail = None
self.message = None
self.walltime = None
self.ppn = None
self.nodes = None
self.workdir = workdir
self.filename = filename
if self.workdir[-1] == os.sep:
self.workdir = self.workdir[:-1]
self.name = os.path.basename(self.workdir)
def initialize(self, nodes=1, ppn=2, walltime=None, message='ae', mail=None, queue=None):
if walltime is None:
walltime = [12, 0, 0]
self.set_walltime(walltime)
self.nodes = nodes
self.ppn = ppn
self.message = message
self.mail = mail
self.queue = queue
def set_walltime(self, walltime):
if len(walltime) == 1:
walltime = [0] + walltime
if len(walltime) == 2:
walltime = [0] + walltime
if len(walltime) == 3:
walltime = [0] + walltime
self.walltime = walltime
def set_template(self, template):
if os.path.isfile(template):
self.template = open(template).read()
elif isinstance(template, str):
self.template = template
def write_pbs(self):
wf = open(self.workdir + os.sep + self.filename, 'w')
wt = self.walltime
wf.write("""#!/bin/sh
#PBS -N %s
#PBS -l nodes=%d:ppn=%d
#PBS -l walltime=%d:%02d:%02d
#PBS -m %s
#PBS -k n
""" % (self.name, self.nodes, self.ppn, wt[0] * 24 + wt[1], wt[2], wt[3], self.message))
if self.mail is not None:
wf.write("#PBS -M %s\n" % self.mail)
if self.queue is not None:
wf.write("#PBS -q %s\n" % self.queue)
wf.write('\ncd $PBS_O_WORKDIR\n')
if self.template is not None:
wf.write("%s\n" % self.template)
wf.close()
def submit(self, priority=0):
cwd = os.getcwd()
os.chdir(self.workdir)
returncode = subprocess.call(["qsub", "%s" % self.filename, '-p', '%d' % priority])
if returncode != 0:
print('Some error happended:', returncode)
os.chdir(cwd)
def get_jobs(user):
data = subprocess.check_output(['qstat', '-x', '-f', '-u', user])
xmldata = ElementTree.fromstring(data)
jobs = [i.find('Job_Name').text for i in xmldata.findall('Job')]
return jobs
def report_cover():
ret = 'PyChemia Execution Report\n'
ret += '=========================\n\n'
ret += 'Hostname\n'
ret += '--------\n\n'
ret += socket.gethostname() + '\n\n'
ret += 'Date\n'
ret += '----\n'
dt = datetime.datetime.now()
ret += dt.strftime("%A, %d. %B %Y %I:%M%p") + '\n\n'
ret += 'PBS VARIABLES\n'
ret += '-------------\n\n'
for x in ['PBS_O_HOST', 'PBS_SERVER', 'PBS_O_QUEUE', 'PBS_O_WORKDIR', 'PBS_ARRAYID', 'PBS_ENVIRONMENT',
'PBS_JOBID', 'PBS_JOBNAME', 'PBS_NODEFILE', 'PBS_QUEUE']:
if os.getenv(x) is not None:
ret += x + ' = ' + os.getenv(x) + '\n'
return ret
| 29.261261 | 107 | 0.547414 |
d8bfb149911a94b3cb766951b8aa6b639adc36ae | 2,380 | py | Python | samples/cli/accelbyte_py_sdk_cli/dsmc/_get_all_pod_config.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | samples/cli/accelbyte_py_sdk_cli/dsmc/_get_all_pod_config.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | samples/cli/accelbyte_py_sdk_cli/dsmc/_get_all_pod_config.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-dsm-controller-service (3.2.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.dsmc import get_all_pod_config as get_all_pod_config_internal
from accelbyte_py_sdk.api.dsmc.models import ModelsListPodConfigResponse
from accelbyte_py_sdk.api.dsmc.models import ResponseError
@click.command()
@click.argument("count", type=int)
@click.argument("offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_all_pod_config(
count: int,
offset: int,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_all_pod_config_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = get_all_pod_config_internal(
count=count,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"GetAllPodConfig failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_all_pod_config.operation_id = "GetAllPodConfig"
get_all_pod_config.is_deprecated = False
| 31.315789 | 88 | 0.735294 |
f593bb68ee913653a1cd7e2f1613da7e930dafb3 | 5,600 | py | Python | ultracart/models/webhook_log_summaries_response.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | ultracart/models/webhook_log_summaries_response.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | ultracart/models/webhook_log_summaries_response.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WebhookLogSummariesResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'Error',
'metadata': 'ResponseMetadata',
'success': 'bool',
'webhook_log_summaries': 'list[WebhookLogSummary]'
}
attribute_map = {
'error': 'error',
'metadata': 'metadata',
'success': 'success',
'webhook_log_summaries': 'webhook_log_summaries'
}
def __init__(self, error=None, metadata=None, success=None, webhook_log_summaries=None):
"""
WebhookLogSummariesResponse - a model defined in Swagger
"""
self._error = None
self._metadata = None
self._success = None
self._webhook_log_summaries = None
self.discriminator = None
if error is not None:
self.error = error
if metadata is not None:
self.metadata = metadata
if success is not None:
self.success = success
if webhook_log_summaries is not None:
self.webhook_log_summaries = webhook_log_summaries
@property
def error(self):
"""
Gets the error of this WebhookLogSummariesResponse.
:return: The error of this WebhookLogSummariesResponse.
:rtype: Error
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this WebhookLogSummariesResponse.
:param error: The error of this WebhookLogSummariesResponse.
:type: Error
"""
self._error = error
@property
def metadata(self):
"""
Gets the metadata of this WebhookLogSummariesResponse.
:return: The metadata of this WebhookLogSummariesResponse.
:rtype: ResponseMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this WebhookLogSummariesResponse.
:param metadata: The metadata of this WebhookLogSummariesResponse.
:type: ResponseMetadata
"""
self._metadata = metadata
@property
def success(self):
"""
Gets the success of this WebhookLogSummariesResponse.
Indicates if API call was successful
:return: The success of this WebhookLogSummariesResponse.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""
Sets the success of this WebhookLogSummariesResponse.
Indicates if API call was successful
:param success: The success of this WebhookLogSummariesResponse.
:type: bool
"""
self._success = success
@property
def webhook_log_summaries(self):
"""
Gets the webhook_log_summaries of this WebhookLogSummariesResponse.
Webhook log summaries
:return: The webhook_log_summaries of this WebhookLogSummariesResponse.
:rtype: list[WebhookLogSummary]
"""
return self._webhook_log_summaries
@webhook_log_summaries.setter
def webhook_log_summaries(self, webhook_log_summaries):
"""
Sets the webhook_log_summaries of this WebhookLogSummariesResponse.
Webhook log summaries
:param webhook_log_summaries: The webhook_log_summaries of this WebhookLogSummariesResponse.
:type: list[WebhookLogSummary]
"""
self._webhook_log_summaries = webhook_log_summaries
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WebhookLogSummariesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.05314 | 100 | 0.593036 |
5f615ceb91178f50bd6476afe9e24dfaf6e0f26f | 14,848 | py | Python | tests/test_transforms.py | ssoheily/pytorchvideo | 25dac74dbcf90f9918a1bd5da5299fd251fdfd89 | [
"Apache-2.0"
] | 3 | 2021-05-27T16:01:23.000Z | 2021-12-08T14:54:58.000Z | tests/test_transforms.py | ssoheily/pytorchvideo | 25dac74dbcf90f9918a1bd5da5299fd251fdfd89 | [
"Apache-2.0"
] | null | null | null | tests/test_transforms.py | ssoheily/pytorchvideo | 25dac74dbcf90f9918a1bd5da5299fd251fdfd89 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from collections import Counter
import numpy as np
import torch
from pytorchvideo.data.utils import thwc_to_cthw
from pytorchvideo.transforms import (
ApplyTransformToKey,
MixUp,
Normalize,
OpSampler,
RandomShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from pytorchvideo.transforms.functional import (
convert_to_one_hot,
uniform_temporal_subsample_repeated,
short_side_scale,
uniform_crop,
uniform_temporal_subsample,
)
from torchvision.transforms import Compose
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
from utils import create_dummy_video_frames
class TestTransforms(unittest.TestCase):
def test_compose_with_video_transforms(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
test_clip = {"video": video, "label": 0}
# Compose using torchvision and pytorchvideo transformst to ensure they interact
# correctly.
num_subsample = 10
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(num_subsample),
NormalizeVideo([video.mean()] * 3, [video.std()] * 3),
RandomShortSideScale(min_size=15, max_size=25),
RandomCropVideo(10),
RandomHorizontalFlipVideo(p=0.5),
]
),
)
]
)
actual = transform(test_clip)
c, t, h, w = actual["video"].shape
self.assertEqual(c, 3)
self.assertEqual(t, num_subsample)
self.assertEqual(h, 10)
self.assertEqual(w, 10)
def test_uniform_temporal_subsample(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, video.shape[1])
self.assertTrue(actual.equal(video))
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, video.shape[1] // 2)
self.assertTrue(actual.equal(video[:, [0, 2, 4, 6, 8, 10, 12, 14, 16, 19]]))
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, 1)
self.assertTrue(actual.equal(video[:, 0:1]))
def test_short_side_scale_width_shorter_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 20, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 10, 5))
def test_short_side_scale_height_shorter_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 20)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 5, 10))
def test_short_side_scale_equal_size_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 10, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 10, 10))
def test_short_side_scale_width_shorter_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 20, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 10, 5))
def test_short_side_scale_height_shorter_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 20)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 5, 10))
def test_short_side_scale_equal_size_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 10, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 10, 10))
def test_torchscriptable_input_output(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
# Test all the torchscriptable tensors.
for transform in [UniformTemporalSubsample(10), RandomShortSideScale(10, 20)]:
transform_script = torch.jit.script(transform)
self.assertTrue(isinstance(transform_script, torch.jit.ScriptModule))
# Seed before each transform to force determinism.
torch.manual_seed(0)
output = transform(video)
torch.manual_seed(0)
script_output = transform_script(video)
self.assertTrue(output.equal(script_output))
def test_uniform_temporal_subsample_repeated(self):
video = thwc_to_cthw(create_dummy_video_frames(32, 10, 10)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample_repeated(video, (1, 4))
expected_shape = ((3, 32, 10, 10), (3, 8, 10, 10))
for idx in range(len(actual)):
self.assertEqual(actual[idx].shape, expected_shape[idx])
def test_uniform_crop(self):
# For videos with height < width.
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
# Left crop.
actual = uniform_crop(video, size=20, spatial_idx=0)
self.assertTrue(actual.equal(video[:, :, 5:25, :20]))
# Center crop.
actual = uniform_crop(video, size=20, spatial_idx=1)
self.assertTrue(actual.equal(video[:, :, 5:25, 10:30]))
# Right crop.
actual = uniform_crop(video, size=20, spatial_idx=2)
self.assertTrue(actual.equal(video[:, :, 5:25, 20:]))
# For videos with height > width.
video = thwc_to_cthw(create_dummy_video_frames(20, 40, 30)).to(
dtype=torch.float32
)
# Top crop.
actual = uniform_crop(video, size=20, spatial_idx=0)
self.assertTrue(actual.equal(video[:, :, :20, 5:25]))
# Center crop.
actual = uniform_crop(video, size=20, spatial_idx=1)
self.assertTrue(actual.equal(video[:, :, 10:30, 5:25]))
# Bottom crop.
actual = uniform_crop(video, size=20, spatial_idx=2)
self.assertTrue(actual.equal(video[:, :, 20:, 5:25]))
def test_uniform_crop_transform(self):
video = thwc_to_cthw(create_dummy_video_frames(10, 30, 40)).to(
dtype=torch.float32
)
test_clip = {"video": video, "aug_index": 1, "label": 0}
transform = UniformCropVideo(20)
actual = transform(test_clip)
c, t, h, w = actual["video"].shape
self.assertEqual(c, 3)
self.assertEqual(t, 10)
self.assertEqual(h, 20)
self.assertEqual(w, 20)
self.assertTrue(actual["video"].equal(video[:, :, 5:25, 10:30]))
def test_normalize(self):
video = thwc_to_cthw(create_dummy_video_frames(10, 30, 40)).to(
dtype=torch.float32
)
transform = Normalize(video.mean(), video.std())
actual = transform(video)
self.assertAlmostEqual(actual.mean().item(), 0)
self.assertAlmostEqual(actual.std().item(), 1)
def test_convert_to_one_hot(self):
# Test without label smooth.
num_class = 5
num_samples = 10
labels = torch.arange(0, num_samples) % num_class
one_hot = convert_to_one_hot(labels, num_class)
self.assertEqual(one_hot.sum(), num_samples)
label_value = 1.0
for index in range(num_samples):
label = labels[index]
self.assertEqual(one_hot[index][label], label_value)
# Test with label smooth.
labels = torch.arange(0, num_samples) % num_class
label_smooth = 0.1
one_hot_smooth = convert_to_one_hot(
labels, num_class, label_smooth=label_smooth
)
self.assertEqual(one_hot_smooth.sum(), num_samples)
label_value_smooth = 1 - label_smooth + label_smooth / num_class
for index in range(num_samples):
label = labels[index]
self.assertEqual(one_hot_smooth[index][label], label_value_smooth)
def test_OpSampler(self):
# Test with weights.
n_transform = 3
transform_list = [lambda x, i=i: x.fill_(i) for i in range(n_transform)]
transform_weight = [1] * n_transform
transform = OpSampler(transform_list, transform_weight)
input_tensor = torch.rand(1)
out_tensor = transform(input_tensor)
self.assertTrue(out_tensor.sum() in list(range(n_transform)))
# Test without weights.
input_tensor = torch.rand(1)
transform_no_weight = OpSampler(transform_list)
out_tensor = transform_no_weight(input_tensor)
self.assertTrue(out_tensor.sum() in list(range(n_transform)))
# Make sure each transform is sampled without replacement.
transform_op_values = [3, 5, 7]
all_possible_out = [15, 21, 35]
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 100
transform_no_replacement = OpSampler(transform_list, num_sample_op=2)
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
self.assertTrue(out_tensor.sum() in all_possible_out)
# Make sure each transform is sampled with replacement.
transform_op_values = [3, 5, 7]
possible_replacement_out = [9, 25, 49]
input_tensor = torch.ones(1)
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 100
transform_no_replacement = OpSampler(
transform_list, replacement=True, num_sample_op=2
)
replace_time = 0
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
if out_tensor.sum() in possible_replacement_out:
replace_time += 1
self.assertTrue(replace_time > 0)
# Test without weights.
transform_op_values = [3.0, 5.0, 7.0]
input_tensor = torch.ones(1)
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 10000
weights = [10.0, 2.0, 1.0]
transform_no_replacement = OpSampler(transform_list, weights)
weight_counter = Counter()
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
weight_counter[out_tensor.sum().item()] += 1
for index, w in enumerate(weights):
gt_dis = w / sum(weights)
out_key = transform_op_values[index]
self.assertTrue(
np.allclose(weight_counter[out_key] / test_time, gt_dis, rtol=0.2)
)
def test_mixup(self):
# Test images.
batch_size = 2
h_size = 10
w_size = 10
c_size = 3
input_images = torch.rand(batch_size, c_size, h_size, w_size)
input_images[0, :].fill_(0)
input_images[1, :].fill_(1)
alpha = 1.0
label_smoothing = 0.0
num_classes = 5
transform_mixup = MixUp(
alpha=alpha,
label_smoothing=label_smoothing,
num_classes=num_classes,
)
labels = torch.arange(0, batch_size) % num_classes
mixed_images, mixed_labels = transform_mixup(input_images, labels)
gt_image_sum = h_size * w_size * c_size
label_sum = batch_size
self.assertTrue(
np.allclose(mixed_images.sum().item(), gt_image_sum, rtol=0.001)
)
self.assertTrue(np.allclose(mixed_labels.sum().item(), label_sum, rtol=0.001))
self.assertEqual(mixed_labels.size(0), batch_size)
self.assertEqual(mixed_labels.size(1), num_classes)
self.assertEqual(mixed_labels.size(1), num_classes)
# Test videos.
batch_size = 2
h_size = 10
w_size = 10
c_size = 3
t_size = 2
input_video = torch.rand(batch_size, c_size, t_size, h_size, w_size)
input_video[0, :].fill_(0)
input_video[1, :].fill_(1)
alpha = 1.0
label_smoothing = 0.0
num_classes = 5
transform_mixup = MixUp(
alpha=alpha,
label_smoothing=label_smoothing,
num_classes=num_classes,
)
labels = torch.arange(0, batch_size) % num_classes
mixed_videos, mixed_labels = transform_mixup(input_video, labels)
gt_video_sum = h_size * w_size * c_size * t_size
label_sum = batch_size
self.assertTrue(
np.allclose(mixed_videos.sum().item(), gt_video_sum, rtol=0.001)
)
self.assertTrue(np.allclose(mixed_labels.sum().item(), label_sum, rtol=0.001))
self.assertEqual(mixed_labels.size(0), batch_size)
self.assertEqual(mixed_labels.size(1), num_classes)
self.assertEqual(mixed_labels.size(1), num_classes)
# Test videos with label smoothing.
input_video = torch.rand(batch_size, c_size, t_size, h_size, w_size)
input_video[0, :].fill_(0)
input_video[1, :].fill_(1)
alpha = 1.0
label_smoothing = 0.2
num_classes = 5
transform_mixup = MixUp(
alpha=alpha,
label_smoothing=label_smoothing,
num_classes=num_classes,
)
labels = torch.arange(0, batch_size) % num_classes
mixed_videos, mixed_labels = transform_mixup(input_video, labels)
gt_video_sum = h_size * w_size * c_size * t_size
label_sum = batch_size
self.assertTrue(
np.allclose(mixed_videos.sum().item(), gt_video_sum, rtol=0.001)
)
self.assertTrue(np.allclose(mixed_labels.sum().item(), label_sum, rtol=0.001))
self.assertEqual(mixed_labels.size(0), batch_size)
self.assertEqual(mixed_labels.size(1), num_classes)
self.assertEqual(mixed_labels.size(1), num_classes)
# Check smoothing value is in label.
smooth_value = label_smoothing / num_classes
self.assertTrue(smooth_value in torch.unique(mixed_labels))
| 38.268041 | 88 | 0.622575 |
c76aa4728141d63366524935fc149d6ea0442358 | 28,168 | py | Python | app/services/dialog/node_op_menu.py | tirinox/thorchainmonitorbot | 9f4604ea3c4ce91239e570b51381209096fc769f | [
"MIT"
] | 3 | 2020-10-13T16:48:35.000Z | 2021-06-02T19:11:51.000Z | app/services/dialog/node_op_menu.py | tirinox/thorchainmonitorbot | 9f4604ea3c4ce91239e570b51381209096fc769f | [
"MIT"
] | null | null | null | app/services/dialog/node_op_menu.py | tirinox/thorchainmonitorbot | 9f4604ea3c4ce91239e570b51381209096fc769f | [
"MIT"
] | 1 | 2021-02-28T19:22:18.000Z | 2021-02-28T19:22:18.000Z | from typing import List
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import Message, CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardRemove
from aiogram.utils.exceptions import MessageNotModified
from aiogram.utils.helper import HelperMode
from services.dialog.base import BaseDialog, message_handler, query_handler
from services.jobs.node_churn import NodeStateDatabase
from services.lib.date_utils import parse_timespan_to_seconds, HOUR
from services.lib.telegram import TelegramInlineList
from services.lib.texts import join_as_numbered_list, grouper
from services.lib.utils import parse_list_from_string, fuzzy_search
from services.models.node_info import NodeInfo
from services.models.node_watchers import NodeWatcherStorage
from services.notify.personal.helpers import NodeOpSetting, STANDARD_INTERVALS
class NodeOpStates(StatesGroup):
mode = HelperMode.snake_case
MAIN_MENU = State()
ADDING = State()
MANAGE_MENU = State()
SETTINGS = State()
SETT_SLASH_ENABLED = State()
SETT_SLASH_PERIOD = State()
SETT_SLASH_THRESHOLD = State()
SETT_BOND_ENABLED = State()
SETT_NEW_VERSION_ENABLED = State()
SETT_UPDATE_VERSION_ENABLED = State()
SETT_CHURNING_ENABLED = State()
SETT_OFFLINE_ENABLED = State()
SETT_OFFLINE_INTERVAL = State()
SETT_HEIGHT_ENABLED = State()
SETT_HEIGHT_LAG_TIME = State()
SETT_IP_ADDRESS = State()
class NodeOpDialog(BaseDialog):
# ----------- MAIN ------------
# @message_handler(state=NodeOpStates.MAIN_MENU)
# async def on_handle_main_menu(self, message: Message):
# if message.text == self.loc.BUTTON_BACK:
# await self.go_back(message)
# else:
# return False
# return True
async def show_main_menu(self, message: Message, with_welcome=True):
await NodeOpStates.MAIN_MENU.set()
watch_list = await self.storage(message.chat.id).all_nodes_with_names_for_user()
inline_kbd = [
[
InlineKeyboardButton(self.loc.BUTTON_NOP_ADD_NODES, callback_data='mm:add'),
InlineKeyboardButton(self.loc.BUTTON_NOP_MANAGE_NODES, callback_data='mm:edit')
],
[
InlineKeyboardButton(self.loc.BUTTON_NOP_SETTINGS, callback_data='mm:settings'),
InlineKeyboardButton(self.loc.BUTTON_BACK, callback_data='back')
]
]
text = self.loc.text_node_op_welcome_text_part2(watch_list, self.deps.node_op_notifier.last_signal_sec_ago)
if with_welcome:
await message.answer(self.loc.TEXT_NOP_INTRO_HEADING,
reply_markup=ReplyKeyboardRemove(),
disable_notification=True)
await message.answer(text,
reply_markup=InlineKeyboardMarkup(inline_keyboard=inline_kbd),
disable_notification=True)
else:
await message.edit_text(text, reply_markup=InlineKeyboardMarkup(inline_keyboard=inline_kbd))
@query_handler(state=NodeOpStates.MAIN_MENU)
async def on_main_menu_callback(self, query: CallbackQuery):
if query.data == 'mm:add':
await self.on_add_node_menu(query.message)
elif query.data == 'mm:edit':
await self.on_manage_menu(query.message)
elif query.data == 'mm:settings':
await self.on_settings_menu(query)
else:
await self.safe_delete(query.message)
await self.go_back(query.message) # fixme: asking lang because query message is bot's message, not user's!
await query.answer()
# -------- ADDING ---------
async def all_nodes_list_maker(self, user_id):
watch_list = set(await self.storage(user_id).all_nodes_for_user())
last_nodes = await self.get_all_nodes()
last_node_texts = [
# add node_address as a tag
(self.loc.short_node_desc(n, watching=(n.node_address in watch_list)), n.node_address) for n in last_nodes
]
return TelegramInlineList(last_node_texts, data_proxy=self.data, back_text=self.loc.BUTTON_BACK,
data_prefix='all_nodes').set_extra_buttons_above(
[
[
InlineKeyboardButton(self.loc.BUTTON_NOP_ADD_ALL_NODES, callback_data='add:all'),
InlineKeyboardButton(self.loc.BUTTON_NOP_ADD_ALL_ACTIVE_NODES, callback_data='add:active')
]
])
async def on_add_node_menu(self, message: Message):
await NodeOpStates.ADDING.set()
tg_list = await self.all_nodes_list_maker(message.chat.id)
# to hide KB
# await message.answer(self.loc.TEXT_NOP_ADD_INSTRUCTIONS_PRE, reply_markup=ReplyKeyboardRemove())
# await message.answer(self.loc.TEXT_NOP_ADD_INSTRUCTIONS, reply_markup=tg_list.reset_page().keyboard())
await message.edit_text(
self.loc.TEXT_NOP_ADD_INSTRUCTIONS_PRE + '\n\n' +
self.loc.TEXT_NOP_ADD_INSTRUCTIONS, reply_markup=tg_list.reset_page().keyboard())
@message_handler(state=NodeOpStates.ADDING)
async def on_add_got_message(self, message: Message):
if message.text == self.loc.BUTTON_BACK:
await self.show_main_menu(message)
return
nodes = await self.parse_nodes_from_text_list(message.text)
if not nodes:
await message.answer(self.loc.TEXT_NOP_SEARCH_NO_VARIANTS)
else:
variants = join_as_numbered_list(map(self.loc.pretty_node_desc, nodes))
await message.answer(self.loc.TEXT_NOP_SEARCH_VARIANTS + '\n\n' + variants)
@query_handler(state=NodeOpStates.ADDING)
async def on_add_list_callback(self, query: CallbackQuery):
user_id = query.message.chat.id
tg_list = await self.all_nodes_list_maker(user_id)
result = await tg_list.handle_query(query)
if result.result == result.BACK:
await self.show_main_menu(query.message, with_welcome=False)
elif result.result == result.SELECTED:
node_to_add = result.selected_data_tag
current_node_set = set(await self.storage(user_id).all_nodes_for_user())
if node_to_add in current_node_set:
await self.storage(user_id).remove_user_nodes([node_to_add])
else:
await self.add_nodes_for_user(query, [node_to_add], user_id, go_back=False)
elif query.data == 'add:all':
last_nodes = await self.get_all_nodes()
await self.add_nodes_for_user(query, [n.node_address for n in last_nodes], user_id)
elif query.data == 'add:active':
last_nodes = await self.get_all_active_nodes()
await self.add_nodes_for_user(query, [n.node_address for n in last_nodes], user_id)
async def add_nodes_for_user(self, query: CallbackQuery, node_list: list, user_id, go_back=True):
if not node_list:
return
await self.storage(user_id).add_user_to_node_list(node_list)
await query.answer(self.loc.text_nop_success_add_banner(node_list))
if go_back:
await self.show_main_menu(query.message, with_welcome=False)
# -------- MANAGE ---------
async def my_node_list_maker(self, user_id):
watch_list = await self.storage(user_id).all_nodes_with_names_for_user()
disconnected_addresses, inactive_addresses = await self.filter_user_nodes_by_category(list(watch_list.keys()))
my_nodes_names = [
# add node_address as a tag
(self.loc.short_node_name(address, name), address) for address, name in watch_list.items()
]
extra_row = []
if watch_list:
extra_row.append(InlineKeyboardButton(
self.loc.BUTTON_NOP_CLEAR_LIST.format(n=len(watch_list)),
callback_data='del:all'
))
if inactive_addresses:
extra_row.append(InlineKeyboardButton(
self.loc.BUTTON_NOP_REMOVE_INACTIVE.format(n=len(inactive_addresses)),
callback_data='del:inactive'
))
if disconnected_addresses:
extra_row.append(InlineKeyboardButton(
self.loc.BUTTON_NOP_REMOVE_DISCONNECTED.format(n=len(disconnected_addresses)),
callback_data='del:disconnected'
))
return TelegramInlineList(
my_nodes_names, data_proxy=self.data,
max_rows=4, back_text=self.loc.BUTTON_BACK, data_prefix='my_nodes'
).set_extra_buttons_above([extra_row])
async def on_manage_menu(self, message: Message):
await NodeOpStates.MANAGE_MENU.set()
tg_list = await self.my_node_list_maker(message.chat.id)
keyboard = tg_list.reset_page().keyboard()
await message.edit_text(self.loc.TEXT_NOP_MANAGE_LIST_TITLE.format(n=len(tg_list)), reply_markup=keyboard)
@query_handler(state=NodeOpStates.MANAGE_MENU)
async def on_manage_callback(self, query: CallbackQuery):
user_id = query.message.chat.id
tg_list = await self.my_node_list_maker(user_id)
result = await tg_list.handle_query(query)
watch_list = await self.storage(user_id).all_nodes_for_user()
disconnected_addresses, inactive_addresses = await self.filter_user_nodes_by_category(list(watch_list))
if result.result == result.BACK:
await self.show_main_menu(query.message, with_welcome=False)
elif result.result == result.SELECTED:
await self.remove_nodes_for_user(query, [result.selected_data_tag], user_id, go_back=False)
elif query.data == 'del:all':
await self.remove_nodes_for_user(query, watch_list, user_id)
elif query.data == 'del:inactive':
await self.remove_nodes_for_user(query, inactive_addresses, user_id)
elif query.data == 'del:disconnected':
await self.remove_nodes_for_user(query, disconnected_addresses, user_id)
async def remove_nodes_for_user(self, query: CallbackQuery, node_list: iter, user_id, go_back=True):
if not node_list:
return
await self.storage(user_id).remove_user_nodes(node_list)
await query.answer(self.loc.text_nop_success_remove_banner(node_list))
if go_back:
await self.show_main_menu(query.message, with_welcome=False)
else:
await self.on_manage_menu(query.message)
# -------- SETTINGS ---------
def settings_kb(self):
loc = self.loc
return InlineKeyboardMarkup(
inline_keyboard=[
[
self.alert_setting_button(loc.BUTTON_NOP_SETT_SLASHING, NodeOpSetting.SLASH_ON),
self.alert_setting_button(loc.BUTTON_NOP_SETT_VERSION,
(NodeOpSetting.VERSION_ON, NodeOpSetting.NEW_VERSION_ON),
data='setting:version'),
self.alert_setting_button(loc.BUTTON_NOP_SETT_OFFLINE, NodeOpSetting.OFFLINE_ON),
],
[
self.alert_setting_button(loc.BUTTON_NOP_SETT_CHURNING, NodeOpSetting.CHURNING_ON),
self.alert_setting_button(loc.BUTTON_NOP_SETT_BOND, NodeOpSetting.BOND_ON),
],
[
self.alert_setting_button(loc.BUTTON_NOP_SETT_HEIGHT, NodeOpSetting.CHAIN_HEIGHT_ON),
self.alert_setting_button(loc.BUTTON_NOP_SETT_IP_ADDR, NodeOpSetting.IP_ADDRESS_ON),
],
[
self.alert_setting_button(loc.BUTTON_NOP_SETT_PAUSE_ALL, NodeOpSetting.PAUSE_ALL_ON, default=False),
],
[
InlineKeyboardButton(self.loc.BUTTON_BACK, callback_data='setting:back')
]
]
)
async def on_settings_menu(self, query: CallbackQuery):
loc = self.loc
await NodeOpStates.SETTINGS.set()
await query.message.edit_text(loc.TEXT_NOP_SETTINGS_TITLE, reply_markup=self.settings_kb())
@query_handler(state=NodeOpStates.SETTINGS)
async def on_setting_callback(self, query: CallbackQuery):
if query.data == 'setting:back':
await self.show_main_menu(query.message, with_welcome=False)
elif query.data == NodeOpSetting.SLASH_ON:
await self.ask_slash_enabled(query)
elif query.data == 'setting:version':
await self.ask_new_version_enabled(query)
elif query.data == NodeOpSetting.BOND_ON:
await self.ask_bond_enabled(query)
elif query.data == NodeOpSetting.OFFLINE_ON:
await self.ask_offline_enabled(query)
elif query.data == NodeOpSetting.CHAIN_HEIGHT_ON:
await self.ask_chain_height_enabled(query)
elif query.data == NodeOpSetting.CHURNING_ON:
await self.ask_churning_enabled(query)
elif query.data == NodeOpSetting.IP_ADDRESS_ON:
await self.ask_ip_address_tracker_enabled(query)
elif query.data == NodeOpSetting.PAUSE_ALL_ON:
await self.toggle_pause_all(query)
await query.answer()
# -------- SETTINGS : PAUSE ALL ---------
async def toggle_pause_all(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.PAUSE_ALL_ON)
self.data[NodeOpSetting.PAUSE_ALL_ON] = not is_on
try:
await query.message.edit_reply_markup(reply_markup=self.settings_kb())
except MessageNotModified:
pass
# -------- SETTINGS : SLASH ---------
async def ask_slash_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.SLASH_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_SLASH_ENABLED,
self.loc.text_nop_slash_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_SLASH_ENABLED)
async def slash_enabled_answer_query(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.SLASH_ON,
self.ask_slash_threshold,
self.on_settings_menu)
async def ask_slash_threshold(self, query: CallbackQuery):
await NodeOpStates.SETT_SLASH_THRESHOLD.set()
await query.message.edit_text(self.loc.TEXT_NOP_SLASH_THRESHOLD, reply_markup=InlineKeyboardMarkup(
inline_keyboard=[
[
InlineKeyboardButton('1 pt', callback_data='1'),
InlineKeyboardButton('2 pts', callback_data='2'),
InlineKeyboardButton('5 pts', callback_data='5'),
],
[
InlineKeyboardButton('10 pts', callback_data='10'),
InlineKeyboardButton('15 pts', callback_data='15'),
InlineKeyboardButton('20 pts', callback_data='20'),
],
[
InlineKeyboardButton('50 pts', callback_data='50'),
InlineKeyboardButton('100 pts', callback_data='100'),
InlineKeyboardButton('200 pts', callback_data='200'),
],
[
InlineKeyboardButton(self.loc.BUTTON_BACK, callback_data='back')
]
]
))
@query_handler(state=NodeOpStates.SETT_SLASH_THRESHOLD)
async def slash_threshold_answer_query(self, query: CallbackQuery):
if query.data == 'back':
await self.on_settings_menu(query)
await query.answer()
else:
threshold = int(query.data)
self.data[NodeOpSetting.SLASH_THRESHOLD] = threshold
await self.ask_slash_period(query)
await query.answer(self.loc.SUCCESS)
async def ask_slash_period(self, query: CallbackQuery):
await NodeOpStates.SETT_SLASH_PERIOD.set()
keyboard = self.inline_keyboard_time_selector()
text = self.loc.text_nop_ask_slash_period(self.data.get(NodeOpSetting.SLASH_THRESHOLD, 1))
await query.message.edit_text(text, reply_markup=keyboard)
@query_handler(state=NodeOpStates.SETT_SLASH_PERIOD)
async def slash_period_answer_query(self, query: CallbackQuery):
if query.data == 'back':
await self.on_settings_menu(query)
await query.answer()
else:
self.data[NodeOpSetting.SLASH_PERIOD] = parse_timespan_to_seconds(query.data)
await self.on_settings_menu(query)
await query.answer(self.loc.SUCCESS)
# -------- SETTINGS : VERSION ---------
async def ask_new_version_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.NEW_VERSION_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_NEW_VERSION_ENABLED,
self.loc.text_nop_new_version_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_NEW_VERSION_ENABLED)
async def new_version_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.NEW_VERSION_ON,
self.ask_version_up_enabled,
self.ask_version_up_enabled)
async def ask_version_up_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.VERSION_ON)
await self.ask_something_enabled(query,
NodeOpStates.SETT_UPDATE_VERSION_ENABLED,
self.loc.text_nop_version_up_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_UPDATE_VERSION_ENABLED)
async def version_up_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.VERSION_ON,
self.on_settings_menu,
self.on_settings_menu)
# -------- SETTINGS : BOND ---------
async def ask_bond_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.BOND_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_BOND_ENABLED,
self.loc.text_nop_bond_is_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_BOND_ENABLED)
async def bond_enabled_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.BOND_ON,
self.on_settings_menu,
self.on_settings_menu)
# -------- SETTINGS : OFFLINE ---------
async def ask_offline_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.OFFLINE_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_OFFLINE_ENABLED,
self.loc.text_nop_offline_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_OFFLINE_ENABLED)
async def offline_enabled_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.OFFLINE_ON,
self.ask_offline_interval,
self.on_settings_menu)
async def ask_offline_interval(self, query: CallbackQuery):
await NodeOpStates.SETT_OFFLINE_INTERVAL.set()
keyboard = self.inline_keyboard_time_selector()
text = self.loc.text_nop_ask_offline_period(self.data.get(NodeOpSetting.OFFLINE_INTERVAL, HOUR))
await query.message.edit_text(text, reply_markup=keyboard)
@query_handler(state=NodeOpStates.SETT_OFFLINE_INTERVAL)
async def offline_period_answer_query(self, query: CallbackQuery):
if query.data == 'back':
await self.on_settings_menu(query)
await query.answer()
else:
self.data[NodeOpSetting.OFFLINE_INTERVAL] = parse_timespan_to_seconds(query.data)
await self.on_settings_menu(query)
await query.answer(self.loc.SUCCESS)
# -------- SETTINGS : CHAIN HEIGHT ---------
async def ask_chain_height_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.CHAIN_HEIGHT_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_HEIGHT_ENABLED,
self.loc.text_nop_chain_height_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_HEIGHT_ENABLED)
async def chain_height_enabled_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.CHAIN_HEIGHT_ON,
self.ask_block_lag_time,
self.on_settings_menu)
async def ask_block_lag_time(self, query: CallbackQuery):
await NodeOpStates.SETT_HEIGHT_LAG_TIME.set()
keyboard = self.inline_keyboard_time_selector()
text = self.loc.text_nop_ask_chain_height_lag_time(self.data.get(NodeOpSetting.CHAIN_HEIGHT_INTERVAL, 1))
await query.message.edit_text(text, reply_markup=keyboard)
@query_handler(state=NodeOpStates.SETT_HEIGHT_LAG_TIME)
async def block_height_lag_time_answer_query(self, query: CallbackQuery):
if query.data == 'back':
await self.on_settings_menu(query)
await query.answer()
else:
period = parse_timespan_to_seconds(query.data)
self.data[NodeOpSetting.CHAIN_HEIGHT_INTERVAL] = period
await self.on_settings_menu(query)
await query.answer(self.loc.SUCCESS)
# -------- SETTINGS : IP ADDRESS ---------
async def ask_ip_address_tracker_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.IP_ADDRESS_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_IP_ADDRESS,
self.loc.text_nop_ip_address_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_IP_ADDRESS)
async def ip_addresss_enabled_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.IP_ADDRESS_ON,
self.on_settings_menu,
self.on_settings_menu)
# -------- SETTINGS : CHURNING ---------
async def ask_churning_enabled(self, query: CallbackQuery):
is_on = self.is_alert_on(NodeOpSetting.CHURNING_ON)
await self.ask_something_enabled(query, NodeOpStates.SETT_CHURNING_ENABLED,
self.loc.text_nop_churning_enabled(is_on),
is_on)
@query_handler(state=NodeOpStates.SETT_CHURNING_ENABLED)
async def churning_enabled_query_handle(self, query: CallbackQuery):
await self.handle_query_for_something_on(query,
NodeOpSetting.CHURNING_ON,
self.on_settings_menu,
self.on_settings_menu)
# ---- UTILS ---
def storage(self, user_id):
return NodeWatcherStorage(self.deps, user_id)
async def get_all_nodes(self):
return await NodeStateDatabase(self.deps).get_last_node_info_list()
async def get_all_active_nodes(self):
nodes = await self.get_all_nodes()
return [n for n in nodes if n.is_active]
async def get_all_inactive_nodes(self):
nodes = await self.get_all_nodes()
return [n for n in nodes if not n.is_active]
async def parse_nodes_from_text_list(self, message: str) -> List[NodeInfo]:
user_items = parse_list_from_string(message, upper=True) # parse
user_items = [item for item in user_items if len(item) >= 3] # filter short
# run fuzzy search
nodes = await self.get_all_nodes()
node_addresses = [n.node_address.upper() for n in nodes]
results = set()
for query in user_items:
variants = fuzzy_search(query, node_addresses)
results.update(set(variants))
# pick node info
nodes_dic = {node.node_address.upper(): node for node in nodes}
return list(filter(bool, (nodes_dic.get(address) for address in results)))
async def filter_user_nodes_by_category(self, node_addresses):
real_nodes = await self.get_all_nodes()
real_nodes_map = {n.node_address: n for n in real_nodes}
disconnected_addresses = set()
inactive_addresses = set()
for address in node_addresses:
node_info: NodeInfo = real_nodes_map.get(address)
if node_info is None:
disconnected_addresses.add(address)
elif not node_info.is_active:
inactive_addresses.add(address)
return disconnected_addresses, inactive_addresses
@classmethod
def is_enabled(cls, cfg):
return bool(cfg.get('telegram.menu.node_op_tools.enabled', default=False))
def is_alert_on(self, name, default=True):
return bool(self.data.get(name, default))
def alert_setting_button(self, orig, setting, data=None, default=True):
data = data or setting
if isinstance(setting, (list, tuple)):
is_on = any(self.is_alert_on(s, default) for s in setting)
else:
is_on = self.is_alert_on(setting, default)
return InlineKeyboardButton(orig + (f' ✔' if is_on else ''), callback_data=data)
async def ask_something_enabled(self, query: CallbackQuery, state: State, text: str, is_on: bool):
await state.set()
loc = self.loc
await query.message.edit_text(
text,
reply_markup=InlineKeyboardMarkup(inline_keyboard=[
[
InlineKeyboardButton(loc.BUTTON_NOP_LEAVE_ON if is_on else loc.BUTTON_NOP_TURN_ON,
callback_data='on'),
InlineKeyboardButton(loc.BUTTON_NOP_LEAVE_OFF if not is_on else loc.BUTTON_NOP_TURN_OFF,
callback_data='off')
],
[InlineKeyboardButton(loc.BUTTON_BACK, callback_data='back')]
]))
async def handle_query_for_something_on(self, query: CallbackQuery, setting, next_on_func, next_off_func):
if query.data == 'back':
await self.on_settings_menu(query)
elif query.data == 'on':
self.data[setting] = True
await next_on_func(query)
elif query.data == 'off':
self.data[setting] = False
await next_off_func(query)
await query.answer()
def inline_keyboard_time_selector(self):
localization = self.loc.BUTTON_NOP_INTERVALS
buttons = [
InlineKeyboardButton(localization.get(t, t), callback_data=t) for t in STANDARD_INTERVALS
]
butt_groups = list(grouper(5, buttons))
butt_groups += [[
InlineKeyboardButton(self.loc.BUTTON_BACK, callback_data='back')
]]
return InlineKeyboardMarkup(inline_keyboard=butt_groups)
| 46.481848 | 120 | 0.630219 |
8107f8447d4614c78105d164706b3f467c519a35 | 1,000 | py | Python | setup.py | DalavanCloud/SyntaViz | e1d09da1df058c4757d39cade4a5cc9e750eb8b8 | [
"Apache-2.0"
] | 1 | 2019-02-02T05:20:38.000Z | 2019-02-02T05:20:38.000Z | setup.py | DalavanCloud/SyntaViz | e1d09da1df058c4757d39cade4a5cc9e750eb8b8 | [
"Apache-2.0"
] | null | null | null | setup.py | DalavanCloud/SyntaViz | e1d09da1df058c4757d39cade4a5cc9e750eb8b8 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
import re
def get_version():
"""
Extract the version from the module's root __init__.py file
"""
root_init_file = open("syntaviz/__init__.py").read()
match = re.search("__version__[ ]+=[ ]+[\"'](.+)[\"']", root_init_file)
return match.group(1) if match is not None else "unknown"
setup(
name="syntaviz",
version=get_version(),
description="SyntaViz",
packages=find_packages(),
package_data={},
python_requires='>=2.7, <3',
install_requires=["flask==0.12.3",
"matplotlib==2.0.2",
"numpy==1.8.2",
"scikit-learn==0.18.2",
"scipy==0.19.1",
"ipython==5.1.0",
"bokeh==0.12.5",
"nltk==3.2.3",
"pandas==0.20.2",
"torch"],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
)
| 26.315789 | 75 | 0.501 |
e67d94e9717aff8d4f50d2ed56eefc6f994ec359 | 4,163 | py | Python | benchmark/startQiskit3384.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3384.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit3384.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=28
prog.cz(input_qubit[0],input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=15
prog.rx(1.8001325905069514,input_qubit[3]) # number=18
prog.z(input_qubit[1]) # number=27
prog.cx(input_qubit[0],input_qubit[3]) # number=16
prog.h(input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.cx(input_qubit[0],input_qubit[3]) # number=34
prog.x(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=33
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.z(input_qubit[1]) # number=21
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.x(input_qubit[1]) # number=17
prog.cx(input_qubit[2],input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
prog.y(input_qubit[0]) # number=13
prog.z(input_qubit[2]) # number=26
prog.cx(input_qubit[2],input_qubit[1]) # number=23
prog.x(input_qubit[0]) # number=19
prog.x(input_qubit[0]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3384.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.691667 | 140 | 0.650973 |
17c776c3cb093dbe49a207771dfe21b6c851222e | 1,325 | py | Python | src/get_data/run.py | iDataist/Building-a-Reproducible-Model-Workflow | c338443d508b6310e1e8ef03346b25d8bf292a5a | [
"MIT"
] | 1 | 2021-07-23T14:10:19.000Z | 2021-07-23T14:10:19.000Z | src/get_data/run.py | iDataist/Building-a-Reproducible-Model-Workflow | c338443d508b6310e1e8ef03346b25d8bf292a5a | [
"MIT"
] | null | null | null | src/get_data/run.py | iDataist/Building-a-Reproducible-Model-Workflow | c338443d508b6310e1e8ef03346b25d8bf292a5a | [
"MIT"
] | null | null | null | import argparse
import logging
import wandb
import sys
import os
sys.path.insert(
1,
"/Users/huiren/Downloads/Building a Reproducible Model"
" Workflow/src/wandb_utils",
)
from log_artifact import log_artifact
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
def go(args):
run = wandb.init(job_type="download_file")
run.config.update(args)
logger.info(f"Returning sample {args.sample}")
logger.info(f"Uploading {args.artifact_name} to Weights & Biases")
log_artifact(
args.artifact_name,
args.artifact_type,
args.artifact_description,
os.path.join("data", args.sample),
run,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Download URL to a local destination"
)
parser.add_argument(
"sample", type=str, help="Name of the sample to download"
)
parser.add_argument(
"artifact_name", type=str, help="Name for the output artifact"
)
parser.add_argument(
"artifact_type", type=str, help="Output artifact type."
)
parser.add_argument(
"artifact_description",
type=str,
help="A brief description of this artifact",
)
args = parser.parse_args()
go(args)
| 22.083333 | 76 | 0.661132 |
bea90e2fd45f189132587856075fe1c026047e53 | 4,061 | py | Python | modules/remind_me_bot.py | ju135/HeinzBot | 17d4f46034176c9d8917fe8d8c750f9897526018 | [
"Apache-2.0"
] | 6 | 2019-05-12T13:30:48.000Z | 2020-07-30T08:58:10.000Z | modules/remind_me_bot.py | ju135/HeinzBot | 17d4f46034176c9d8917fe8d8c750f9897526018 | [
"Apache-2.0"
] | 16 | 2019-05-11T14:07:06.000Z | 2021-11-29T22:13:35.000Z | modules/remind_me_bot.py | ju135/HeinzBot | 17d4f46034176c9d8917fe8d8c750f9897526018 | [
"Apache-2.0"
] | 5 | 2019-05-11T13:29:47.000Z | 2020-01-15T12:18:40.000Z | import datetime
import dateparser
import pytz
import telegram
from telegram import Update, Message, User
from telegram.ext import CallbackContext
from modules.abstract_module import AbstractModule
from utils.decorators import register_module, register_command, log_errors
from utils.random_text import get_random_string_of_messages_file
def command(context: CallbackContext):
message: Message = context.job.context[0] # The message object is stored as job data
specified_message = context.job.context[1]
from_user: User = context.job.context[2]
text = ""
if from_user is not None:
name = from_user.name
name = from_user.full_name if name is None else name
name = "Kollege" if name is None else name
user_tag = f"[{name}](tg://user?id={from_user.id})\n"
text += user_tag
if specified_message is not None:
text += f"🚨 {specified_message} 🚨"
else:
text += get_random_string_of_messages_file("constants/messages/reminder_messages.json")
context.bot.send_message(chat_id=message.chat_id,
text=text,
reply_to_message_id=message.message_id,
parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)
@register_module()
class RemindMeBot(AbstractModule):
@register_command(command="remindme",
short_desc="Reminds you of important stuff ⏰",
long_desc=f"Specify a time or time-interval together with an optional message and "
f"I will remind you by replying to your command at the specified time.",
usage=["/remindme $time [$message]", "/remindme 2h", "/remindme 30min Time for coffee",
"/remindme 1h30min Drink some water", "/remindme 31.12.2021 New year"])
@log_errors()
def remind_me_command(self, update: Update, context: CallbackContext):
query = self.get_command_parameter("/remindme", update)
if not query:
update.message.reply_text("Jetzt glei oda wos? Sunst miassast ma a Zeit augem.")
return
query_parts = query.split(" ")
date_part = query_parts[0]
specified_message = None
# Everything after the first space counts as message to be reminded of
if len(query_parts) > 1:
specified_message = " ".join(query_parts[1:])
parsed_date = dateparser.parse(date_part, locales=["de-AT", "en-AT"], settings={'TIMEZONE': 'Europe/Vienna',
'PREFER_DAY_OF_MONTH': 'first',
'PREFER_DATES_FROM': 'future'})
if parsed_date is None:
update.message.reply_text("I versteh de Zeitangabe leider ned.. Bitte formuliers a bissl ondas.")
return
formatted_date = parsed_date.strftime("%d.%m.%Y, %H:%M:%S")
if parsed_date < datetime.datetime.now():
update.message.reply_text(f"Wüst mi pflanzen? Der Zeitpunkt ({formatted_date}) is jo scho vorbei.. "
f"Do kau i kan Reminder mochn.")
return
parsed_date = pytz.timezone('Europe/Vienna').localize(parsed_date) # Set the timezone
message_to_reply = update.message
user_to_tag = None
if update.message.reply_to_message is not None:
message_to_reply = update.message.reply_to_message
if message_to_reply.from_user is not update.message.from_user:
# tag the requesting user when replying to a different message
user_to_tag = update.message.from_user
update.message.reply_text(f"Passt, bitte oida - i möd mi dann zu dem Zeitpunkt: {formatted_date}")
context.dispatcher.job_queue.run_once(callback=command,
when=parsed_date,
context=[message_to_reply, specified_message, user_to_tag])
| 46.147727 | 116 | 0.625462 |
69ffa9cab799505462f33d39e69764267bbfcb88 | 7,127 | py | Python | lib/galaxy/auth/providers/ldap_ad.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 1 | 2019-11-03T11:45:43.000Z | 2019-11-03T11:45:43.000Z | lib/galaxy/auth/providers/ldap_ad.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 4 | 2017-05-24T19:36:34.000Z | 2019-08-23T02:49:18.000Z | lib/galaxy/auth/providers/ldap_ad.py | abretaud/galaxy | 1ad89511540e6800cd2d0da5d878c1c77d8ccfe9 | [
"CC-BY-3.0"
] | null | null | null | """
Created on 15/07/2014
@author: Andrew Robinson
"""
import logging
from galaxy.auth import _get_bool
from galaxy.exceptions import ConfigurationError
from ..providers import AuthProvider
log = logging.getLogger(__name__)
def _get_subs(d, k, params):
if k not in d:
raise ConfigurationError("Missing '%s' parameter in LDAP options" % k)
return str(d[k]).format(**params)
def _parse_ldap_options(ldap, options_unparsed):
# Tag is defined in the XML but is empty
if not options_unparsed:
return []
if "=" not in options_unparsed:
log.error("LDAP authenticate: Invalid syntax in <ldap-options>. Syntax should be option1=value1,option2=value2")
return []
ldap_options = []
# Valid options must start with this prefix. See help(ldap)
prefix = "OPT_"
for opt in options_unparsed.split(","):
key, value = opt.split("=")
try:
pair = []
for n in (key, value):
if not n.startswith(prefix):
raise ValueError
name = getattr(ldap, n)
pair.append(name)
except ValueError:
log.warning("LDAP authenticate: Invalid parameter pair %s=%s. '%s' doesn't start with prefix %s", key, value, n, prefix)
continue
except AttributeError:
log.warning("LDAP authenticate: Invalid parameter pair %s=%s. '%s' is not available in module ldap", key, value, n)
continue
else:
log.debug("LDAP authenticate: Valid LDAP option pair %s=%s -> %s=%s", key, value, *pair)
ldap_options.append(pair)
return ldap_options
class LDAP(AuthProvider):
"""
Attempts to authenticate users against an LDAP server.
If options include search-fields then it will attempt to search LDAP for
those fields first. After that it will bind to LDAP with the username
(formatted as specified).
"""
plugin_type = 'ldap'
def authenticate(self, email, username, password, options):
"""
See abstract method documentation.
"""
log.debug("LDAP authenticate: email is %s" % email)
log.debug("LDAP authenticate: username is %s" % username)
log.debug("LDAP authenticate: options are %s" % options)
failure_mode = False # reject but continue
if options.get('continue-on-failure', 'False') == 'False':
failure_mode = None # reject and do not continue
if _get_bool(options, 'login-use-username', False):
if username is None:
log.debug('LDAP authenticate: username must be used to login, cannot be None')
return (failure_mode, '', '')
else:
if email is None:
log.debug('LDAP authenticate: email must be used to login, cannot be None')
return (failure_mode, '', '')
try:
import ldap
except:
log.debug('LDAP authenticate: could not load ldap module')
return (failure_mode, '', '')
# do LDAP search (if required)
params = {'email': email, 'username': username, 'password': password}
try:
ldap_options_raw = _get_subs(options, 'ldap-options', params)
except ConfigurationError:
ldap_options = ()
else:
ldap_options = _parse_ldap_options(ldap, ldap_options_raw)
if 'search-fields' in options:
try:
# setup connection
ldap.set_option(ldap.OPT_REFERRALS, 0)
for opt in ldap_options:
ldap.set_option(*opt)
l = ldap.initialize(_get_subs(options, 'server', params))
l.protocol_version = 3
if 'search-user' in options:
l.simple_bind_s(_get_subs(options, 'search-user', params),
_get_subs(options, 'search-password', params))
else:
l.simple_bind_s()
# setup search
attributes = [_.strip().format(**params)
for _ in options['search-fields'].split(',')]
suser = l.search_ext_s(_get_subs(options, 'search-base', params),
ldap.SCOPE_SUBTREE,
_get_subs(options, 'search-filter', params), attributes,
timeout=60, sizelimit=1)
# parse results
if suser is None or len(suser) == 0:
log.warning('LDAP authenticate: search returned no results')
return (failure_mode, '', '')
dn, attrs = suser[0]
log.debug(("LDAP authenticate: dn is %s" % dn))
log.debug(("LDAP authenticate: search attributes are %s" % attrs))
if hasattr(attrs, 'has_key'):
for attr in attributes:
if attr in attrs:
params[attr] = str(attrs[attr][0])
else:
params[attr] = ""
params['dn'] = dn
except Exception:
log.exception('LDAP authenticate: search exception')
return (failure_mode, '', '')
# end search
# bind as user to check their credentials
try:
# setup connection
ldap.set_option(ldap.OPT_REFERRALS, 0)
for opt in ldap_options:
ldap.set_option(*opt)
l = ldap.initialize(_get_subs(options, 'server', params))
l.protocol_version = 3
bind_password = _get_subs(options, 'bind-password', params)
if not bind_password:
raise RuntimeError('LDAP authenticate: empty password')
l.simple_bind_s(_get_subs(
options, 'bind-user', params), bind_password)
try:
whoami = l.whoami_s()
except ldap.PROTOCOL_ERROR:
# The "Who am I?" extended operation is not supported by this LDAP server
pass
else:
log.debug("LDAP authenticate: whoami is %s", whoami)
if whoami is None:
raise RuntimeError('LDAP authenticate: anonymous bind')
except Exception:
log.warning('LDAP authenticate: bind exception', exc_info=True)
return (failure_mode, '', '')
log.debug('LDAP authentication successful')
return (True,
_get_subs(options, 'auto-register-email', params),
_get_subs(options, 'auto-register-username', params))
def authenticate_user(self, user, password, options):
"""
See abstract method documentation.
"""
return self.authenticate(user.email, user.username, password, options)[0]
class ActiveDirectory(LDAP):
""" Effectively just an alias for LDAP auth, but may contain active directory specific
logic in the future. """
plugin_type = 'activedirectory'
__all__ = ('LDAP', 'ActiveDirectory')
| 35.108374 | 132 | 0.56293 |
52ddee63b09c33c464158e019e4455e68d436e9c | 151 | py | Python | mybuiltinfunctions/my-divmod.py | d-mwenda/my-python-notebook | 13dcb30d808a3ed88741f7ea904d387e3c0a8baa | [
"MIT"
] | null | null | null | mybuiltinfunctions/my-divmod.py | d-mwenda/my-python-notebook | 13dcb30d808a3ed88741f7ea904d387e3c0a8baa | [
"MIT"
] | null | null | null | mybuiltinfunctions/my-divmod.py | d-mwenda/my-python-notebook | 13dcb30d808a3ed88741f7ea904d387e3c0a8baa | [
"MIT"
] | null | null | null | """
Takes 2 non complex numbers as arguments and returns a pair of numbers consisting
of their quotient and remainder when using integer division.
"""
| 30.2 | 81 | 0.788079 |
cc416396ced5c45e9492b71fe010551962a29b49 | 2,459 | py | Python | tools/kernel_names.py | datalayer-contrib/jupyterwidgets-tutorial | 81a4d143e456e988302c40ff4405dd5c33ce8313 | [
"BSD-3-Clause"
] | 342 | 2017-08-23T18:36:58.000Z | 2022-03-11T18:47:31.000Z | tools/kernel_names.py | maartenbreddels/tutorial | 4f576647fc5e9e1697e241f5207d25514d50255c | [
"BSD-3-Clause"
] | 118 | 2017-08-23T01:42:45.000Z | 2022-02-14T18:11:47.000Z | 01-pandas-ipywidgets/jupyter-widget-ecosystem/tools/kernel_names.py | dushyantkhosla/viz4ds | 05a004a390d180d87be2d09873c3f7283c2a2e27 | [
"MIT"
] | 152 | 2017-08-22T22:24:28.000Z | 2022-03-31T12:45:37.000Z | from argparse import ArgumentParser
from pathlib import Path
import nbformat
NB_VERSION = 4
def change_kernel_name(notebook_name, kernel_name, display_name=None):
"""
Change the name of the notebook kernel.
"""
dname = display_name if display_name else kernel_name
notebook = nbformat.read(notebook_name, NB_VERSION)
current_kname = notebook['metadata']['kernelspec']['name']
current_dname = notebook['metadata']['kernelspec']['display_name']
if current_kname == kernel_name and current_dname == dname:
print('not changing kernel of {}'.format(notebook_name))
return
notebook['metadata']['kernelspec']['name'] = kernel_name
notebook['metadata']['kernelspec']['display_name'] = dname
# print('\nHad this been a real operation, would have changed {}'.format(notebook_name))
# print('\t\tcurrent: {} to new: {}\n'.format(current_kname, kernel_name))
nbformat.write(notebook, notebook_name)
def get_kernel_name(notebook_name):
"""
Return the name of the kernel in the notebook.
"""
notebook = nbformat.read(notebook_name, NB_VERSION)
kname = notebook['metadata']['kernelspec']['name']
return kname
if __name__ == '__main__':
parser = ArgumentParser(description='Get or set kernel names for all '
'notebooks in a directory.')
parser.add_argument('-d', '--directory', default='.',
help='Directory in which to look for notebooks.')
parser.add_argument('-s', '--set',
dest='kernel_name',
metavar='kernel_name',
help="Set the kernel to this name for each notebook.")
parser.add_argument('--display-name',
help="Display name of the kernel (default is same as "
"kernel name).")
args = parser.parse_args()
directory = args.directory if args.directory else '.'
p = Path(directory)
notebooks = list(p.glob('**/*.ipynb'))
if not notebooks:
raise RuntimeError('No notebooks found at path {}'.format(directory))
for notebook in notebooks:
nb_str = str(notebook)
if args.kernel_name:
change_kernel_name(nb_str, args.kernel_name,
display_name=args.display_name)
else:
kname = get_kernel_name(nb_str)
print('{}\t\t\t\t{}'.format(nb_str, kname))
| 36.701493 | 92 | 0.622204 |
f467eeeb0c3f17e10601af47a893e87c654241c4 | 332 | py | Python | logic/not_gate.py | FPGA-4-all/FPGA4all_MyHDL | 20ab2b1ff86d7b13cfca947967a9b4618325fbb1 | [
"MIT"
] | null | null | null | logic/not_gate.py | FPGA-4-all/FPGA4all_MyHDL | 20ab2b1ff86d7b13cfca947967a9b4618325fbb1 | [
"MIT"
] | null | null | null | logic/not_gate.py | FPGA-4-all/FPGA4all_MyHDL | 20ab2b1ff86d7b13cfca947967a9b4618325fbb1 | [
"MIT"
] | null | null | null | #not gate class
from myhdl import *
class not_gate:
def __init__(self):
self.inputs = 1
def one_input_not(self, a, b):
"""one input not gate
a -> input
b -. output
"""
@always_comb
def not_logic():
b.next = intbv(not(a))
return not_logic
| 19.529412 | 34 | 0.506024 |
491a568ab82004596d406ef46047eed9a3affbdc | 5,470 | py | Python | stcSeg/data/builtin.py | ylqi/STC-Seg | 4c881fa285051b4e2879afb7907f7562973d0669 | [
"MIT"
] | null | null | null | stcSeg/data/builtin.py | ylqi/STC-Seg | 4c881fa285051b4e2879afb7907f7562973d0669 | [
"MIT"
] | null | null | null | stcSeg/data/builtin.py | ylqi/STC-Seg | 4c881fa285051b4e2879afb7907f7562973d0669 | [
"MIT"
] | null | null | null | import os
from detectron2.data.datasets.register_coco import register_coco_instances
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .datasets.text import register_text_instances
from .datasets.kitti import get_kitti_instances_meta, register_kitti_instances
from .datasets.kitti_mots import register_kitti_mots_instances, get_kitti_mots_instances_meta
from .datasets.kitti_mot import register_kitti_mot_instances, get_kitti_mot_instances_meta
from .datasets.ytvis import register_ytvis_instances, get_ytvis_instances_meta
# register plane reconstruction
_PREDEFINED_SPLITS_PIC = {
"pic_person_train": ("pic/image/train", "pic/annotations/train_person.json"),
"pic_person_val": ("pic/image/val", "pic/annotations/val_person.json"),
}
metadata_pic = {
"thing_classes": ["person"]
}
_PREDEFINED_SPLITS_TEXT = {
"totaltext_train": ("totaltext/train_images", "totaltext/train.json"),
"totaltext_val": ("totaltext/test_images", "totaltext/test.json"),
"ctw1500_word_train": ("CTW1500/ctwtrain_text_image", "CTW1500/annotations/train_ctw1500_maxlen100_v2.json"),
"ctw1500_word_test": ("CTW1500/ctwtest_text_image","CTW1500/annotations/test_ctw1500_maxlen100.json"),
"syntext1_train": ("syntext1/images", "syntext1/annotations/train.json"),
"syntext2_train": ("syntext2/images", "syntext2/annotations/train.json"),
"mltbezier_word_train": ("mlt2017/images","mlt2017/annotations/train.json"),
}
metadata_text = {
"thing_classes": ["text"]
}
def register_all_coco(root="datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_PIC.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
metadata_pic,
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for key, (image_root, json_file) in _PREDEFINED_SPLITS_TEXT.items():
# Assume pre-defined datasets live in `./datasets`.
register_text_instances(
key,
metadata_text,
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined datasets and splits for YTVIS ==========
_PREDEFINED_SPLITS_YTVIS = {
"ytvis_train": ("ytvis/train/JPEGImages", "ytvis/train.json"),
"ytvis_val": ("ytvis/train/JPEGImages", "ytvis/valid.json"),
"ytvis_sub_train": ("ytvis/train/JPEGImages", "ytvis/train_sub-train.json"),
"ytvis_sub_val": ("ytvis/train/JPEGImages", "ytvis/train_sub-val.json"),
}
def register_all_ytvis(root="datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_YTVIS.items():
# Assume pre-defined datasets live in `./datasets`.
register_ytvis_instances(
key,
get_ytvis_instances_meta(key),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined datasets and splits for KITTI ==========
_PREDEFINED_SPLITS_KITTI = {
"kitti_object": {
"kitti_object_train": ("kitti/training/image_2", "kitti/annotations/instances_train.json"),
"kitti_object_val": ("kitti/training/image_2", "kitti/annotations/instances_val.json"),
},
"kitti_mots": {
"kitti_mots_train": ("kitti_mots/training/image_02", "kitti_mots/instances_txt"),
"kitti_mots_val": ("kitti_mots/training/image_02", "kitti_mots/instances_txt"),
"kitti_mots_train_full": ("kitti_mots/training/image_02", "kitti_mots/instances_txt"),
},
"kitti_mot": {
"kitti_mot_train": ("kitti_mot/training/image_02", "kitti_mot/training/label_02"),
"kitti_mot_val": ("kitti_mot/training/image_02", "kitti_mot/training/label_02"),
},
}
def register_all_kitti(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_KITTI.items():
if dataset_name == "kitti_mots":
for key, (image_root, txt_root) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_kitti_mots_instances(
key,
get_kitti_mots_instances_meta(dataset_name),
os.path.join(root, txt_root) if "://" not in txt_root else txt_root,
os.path.join(root, image_root),
)
elif dataset_name == "kitti_mot":
for key, (image_root, txt_root) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_kitti_mot_instances(
key,
get_kitti_mot_instances_meta(dataset_name),
os.path.join(root, txt_root) if "://" not in txt_root else txt_root,
os.path.join(root, image_root),
)
else:
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_kitti_instances(
key,
get_kitti_instances_meta(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
register_all_coco()
register_all_kitti()
register_all_ytvis() | 37.465753 | 113 | 0.659963 |
965e836ee8ba7ce69c5319c282699df9a4b58897 | 39,202 | py | Python | packstack/installer/run_setup.py | xbezdick/packstack | 6f7673436f24cb16d0dad46a1bf8a3dc0a21e18a | [
"Apache-2.0"
] | null | null | null | packstack/installer/run_setup.py | xbezdick/packstack | 6f7673436f24cb16d0dad46a1bf8a3dc0a21e18a | [
"Apache-2.0"
] | null | null | null | packstack/installer/run_setup.py | xbezdick/packstack | 6f7673436f24cb16d0dad46a1bf8a3dc0a21e18a | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import copy
import datetime
import getpass
import logging
import os
import re
import sys
from StringIO import StringIO
import traceback
import types
import textwrap
from optparse import OptionGroup
from optparse import OptionParser
import basedefs
import validators
from . import utils
import processors
import output_messages
from .exceptions import FlagValidationError
from .exceptions import ParamValidationError
from packstack import version
from packstack.modules.common import filtered_hosts
from setup_controller import Controller
controller = Controller()
commandLineValues = {}
# List to hold all values to be masked in logging (i.e. passwords and sensitive data)
# TODO: read default values from conf_param?
masked_value_set = set()
tmpfiles = []
def initLogging(debug):
try:
logFile = os.path.join(basedefs.DIR_LOG, basedefs.FILE_LOG)
# Create the log file with specific permissions, puppet has a habbit of putting
# passwords in logs
os.close(os.open(logFile, os.O_CREAT | os.O_EXCL, 0o600))
hdlr = logging.FileHandler(filename=logFile, mode='w')
if (debug):
level = logging.DEBUG
else:
level = logging.INFO
fmts = '%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s'
dfmt = '%Y-%m-%d %H:%M:%S'
fmt = logging.Formatter(fmts, dfmt)
hdlr.setFormatter(fmt)
logging.root.handlers = []
logging.root.addHandler(hdlr)
logging.root.setLevel(level)
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_FAILED_INIT_LOGGER)
return logFile
def _getInputFromUser(param):
"""
this private func reads the data from the user
for the given param
"""
loop = True
userInput = None
try:
if param.USE_DEFAULT:
logging.debug("setting default value (%s) for key (%s)" % (mask(param.DEFAULT_VALUE), param.CONF_NAME))
controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE
else:
while loop:
# If the value was not supplied by the command line flags
if param.CONF_NAME not in commandLineValues:
message = StringIO()
message.write(param.PROMPT)
val_list = param.VALIDATORS or []
if(validators.validate_regexp not in val_list
and param.OPTION_LIST):
message.write(" [%s]" % "|".join(param.OPTION_LIST))
if param.DEFAULT_VALUE:
message.write(" [%s] " % (str(param.DEFAULT_VALUE)))
message.write(": ")
message.seek(0)
# mask password or hidden fields
if (param.MASK_INPUT):
userInput = getpass.getpass("%s :" % (param.PROMPT))
else:
userInput = raw_input(message.read())
else:
userInput = commandLineValues[param.CONF_NAME]
# If DEFAULT_VALUE is set and user did not input anything
if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0:
userInput = param.DEFAULT_VALUE
# Param processing
userInput = process_param_value(param, userInput)
# If param requires validation
try:
validate_param_value(param, userInput)
controller.CONF[param.CONF_NAME] = userInput
loop = False
except ParamValidationError:
if param.LOOSE_VALIDATION:
# If validation failed but LOOSE_VALIDATION is true, ask user
answer = _askYesNo("User input failed validation, "
"do you still wish to use it")
loop = not answer
if answer:
controller.CONF[param.CONF_NAME] = userInput
continue
else:
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
else:
# Delete value from commandLineValues so that we will prompt the user for input
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
loop = True
except KeyboardInterrupt:
# add the new line so messages wont be displayed in the same line as the question
print("")
raise
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM % (param.CONF_NAME))
def input_param(param):
"""
this func will read input from user
and ask confirmation if needed
"""
# We need to check if a param needs confirmation, (i.e. ask user twice)
# Do not validate if it was given from the command line
if param.NEED_CONFIRM and param.CONF_NAME not in commandLineValues:
# create a copy of the param so we can call it twice
confirmedParam = copy.deepcopy(param)
confirmedParamName = param.CONF_NAME + "_CONFIRMED"
confirmedParam.CONF_NAME = confirmedParamName
confirmedParam.PROMPT = output_messages.INFO_CONF_PARAMS_PASSWD_CONFIRM_PROMPT
# Now get both values from user (with existing validations)
while True:
_getInputFromUser(param)
_getInputFromUser(confirmedParam)
if controller.CONF[param.CONF_NAME] == controller.CONF[confirmedParamName]:
logging.debug("Param confirmation passed, value for both questions is identical")
break
else:
print(output_messages.INFO_VAL_PASSWORD_DONT_MATCH)
else:
_getInputFromUser(param)
return param
def _askYesNo(question=None):
message = StringIO()
while True:
askString = "\r%s? (yes|no): " % (question)
logging.debug("asking user: %s" % askString)
message.write(askString)
message.seek(0)
raw = raw_input(message.read())
if not len(raw):
continue
answer = raw[0].lower()
logging.debug("user answered read: %s" % (answer))
if answer not in 'yn':
continue
return answer == 'y'
def _addDefaultsToMaskedValueSet():
"""
For every param in conf_params
that has MASK_INPUT enabled keep the default value
in the 'masked_value_set'
"""
global masked_value_set
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# Keep default password values masked, but ignore default empty values
if ((param.MASK_INPUT is True) and param.DEFAULT_VALUE != ""):
masked_value_set.add(param.DEFAULT_VALUE)
def _updateMaskedValueSet():
"""
For every param in conf
has MASK_INPUT enabled keep the user input
in the 'masked_value_set'
"""
global masked_value_set
for confName in controller.CONF:
# Add all needed values to masked_value_set
if (controller.getParamKeyValue(confName, "MASK_INPUT") is True):
masked_value_set.add(controller.CONF[confName])
def mask(input):
"""
Gets a dict/list/str and search maksked values in them.
The list of masked values in is masked_value_set and is updated
via the user input
If it finds, it replaces them with '********'
"""
output = copy.deepcopy(input)
if isinstance(input, types.DictType):
for key in input:
if isinstance(input[key], types.StringType):
output[key] = utils.mask_string(input[key],
masked_value_set)
if isinstance(input, types.ListType):
for item in input:
org = item
orgIndex = input.index(org)
if isinstance(item, types.StringType):
item = utils.mask_string(item, masked_value_set)
if item != org:
output.remove(org)
output.insert(orgIndex, item)
if isinstance(input, types.StringType):
output = utils.mask_string(input, masked_value_set)
return output
def removeMaskString(maskedString):
"""
remove an element from masked_value_set
we need to itterate over the set since
calling set.remove() on an string that does not exit
will raise an exception
"""
global masked_value_set
# Since we cannot remove an item from a set during itteration over
# the said set, we only mark a flag and if the flag is set to True
# we remove the string from the set.
found = False
for item in masked_value_set:
if item == maskedString:
found = True
if found:
masked_value_set.remove(maskedString)
def validate_param_value(param, value):
cname = param.CONF_NAME
logging.debug("Validating parameter %s." % cname)
val_list = param.VALIDATORS or []
opt_list = param.OPTION_LIST
for val_func in val_list:
try:
val_func(value, opt_list)
except ParamValidationError as ex:
print('Parameter %s failed validation: %s' % (cname, ex))
raise
def process_param_value(param, value):
_value = value
proclist = param.PROCESSORS or []
for proc_func in proclist:
is_silent = getattr(proc_func, 'silent', False)
logging.debug("Processing value of parameter "
"%s." % param.CONF_NAME)
try:
new_value = proc_func(_value, param.CONF_NAME, controller.CONF)
if new_value != _value:
if param.MASK_INPUT is False and not is_silent:
msg = output_messages.INFO_CHANGED_VALUE
print(msg % (_value, new_value))
_value = new_value
else:
logging.debug("Processor returned the original "
"value: %s" % _value)
except processors.ParamProcessingError as ex:
print("Value processing of parameter %s "
"failed.\n%s" % (param.CONF_NAME, ex))
raise
return _value
def _handleGroupCondition(config, conditionName, conditionValue):
"""
handle params group pre/post condition
checks if a group has a pre/post condition
and validates the params related to the group
"""
# If the post condition is a function
if callable(conditionName):
# Call the function conditionName with conf as the arg
conditionValue = conditionName(controller.CONF)
# If the condition is a string - just read it to global conf
# We assume that if we get a string as a member it is the name of a member of conf_params
elif isinstance(conditionName, types.StringType):
conditionValue = _loadParamFromFile(config, "general", conditionName)
else:
# Any other type is invalid
raise TypeError("%s type (%s) is not supported" % (conditionName, type(conditionName)))
return conditionValue
def _loadParamFromFile(config, section, param_name):
"""
read param from file
validate it
and load to to global conf dict
"""
param = controller.getParamByName(param_name)
# Get value from answer file
try:
value = config.get(section, param_name)
except ConfigParser.NoOptionError:
value = None
# Check for deprecated parameters
deprecated = param.DEPRECATES if param.DEPRECATES is not None else []
for old_name in deprecated:
try:
val = config.get(section, old_name)
except ConfigParser.NoOptionError:
continue
if not val:
# value is empty string
continue
if value is None:
value = val
if value != val:
raise ValueError('Parameter %(param_name)s deprecates '
'following parameters:\n%(deprecated)s.\n'
'Please either use parameter %(param_name)s '
'or use same value for all deprecated '
'parameters.' % locals())
if deprecated and value is not None:
controller.MESSAGES.append('Deprecated parameter has been used '
'in answer file. Please use parameter '
'%(param_name)s next time. This '
'parameter deprecates following '
'parameters: %(deprecated)s.'
% locals())
if value is None:
# Let's use default value if we have one
value = getattr(param, 'DEFAULT_VALUE', None)
if value is None:
raise KeyError('Parser cannot find option %s in answer file.'
% param_name)
# Validate param value using its validation func
value = process_param_value(param, value)
validate_param_value(param, value)
# Keep param value in our never ending global conf
controller.CONF[param.CONF_NAME] = value
return value
def _handleAnswerFileParams(answerFile):
"""
handle loading and validating
params from answer file
supports reading single or group params
"""
try:
logging.debug("Starting to handle config file")
# Read answer file
fconf = ConfigParser.ConfigParser()
fconf.read(answerFile)
# Iterate all the groups and check the pre/post conditions
for group in controller.getAllGroups():
# Get all params per group
# Handle pre conditions for group
preConditionValue = True
if group.PRE_CONDITION:
preConditionValue = _handleGroupCondition(fconf, group.PRE_CONDITION, preConditionValue)
# Handle pre condition match with case insensitive values
if preConditionValue == group.PRE_CONDITION_MATCH:
for param in group.parameters.itervalues():
_loadParamFromFile(fconf, "general", param.CONF_NAME)
# Handle post conditions for group only if pre condition passed
postConditionValue = True
if group.POST_CONDITION:
postConditionValue = _handleGroupCondition(fconf, group.POST_CONDITION, postConditionValue)
# Handle post condition match for group
if postConditionValue != group.POST_CONDITION_MATCH:
logging.error("The group condition (%s) returned: %s, which differs from the excpeted output: %s" %
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
raise ValueError(output_messages.ERR_EXP_GROUP_VALIDATION_ANS_FILE %
(group.GROUP_NAME, postConditionValue, group.POST_CONDITION_MATCH))
else:
logging.debug("condition (%s) passed" % group.POST_CONDITION)
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
else:
logging.debug("skipping params group %s since value of group validation is %s" % (group.GROUP_NAME, preConditionValue))
except Exception as e:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_ANSWER_FILE % (e))
def _getanswerfilepath():
path = None
msg = "Could not find a suitable path on which to create the answerfile"
ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
p = os.path.expanduser("~/")
if os.access(p, os.W_OK):
path = os.path.abspath(os.path.join(p, "packstack-answers-%s.txt" % ts))
msg = "A new answerfile was created in: %s" % path
controller.MESSAGES.append(msg)
return path
def _gettmpanswerfilepath():
path = None
msg = "Could not find a suitable path on which to create the temporary answerfile"
ts = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
p = os.path.expanduser("~/")
if os.access(p, os.W_OK):
path = os.path.abspath(os.path.join(p, "tmp-packstack-answers-%s.txt" % ts))
tmpfiles.append(path)
return path
def _handleInteractiveParams():
try:
logging.debug("Groups: %s" % ', '.join([x.GROUP_NAME for x in controller.getAllGroups()]))
for group in controller.getAllGroups():
preConditionValue = True
logging.debug("going over group %s" % group.GROUP_NAME)
# If pre_condition is set, get Value
if group.PRE_CONDITION:
preConditionValue = _getConditionValue(group.PRE_CONDITION)
inputLoop = True
# If we have a match, i.e. condition returned True, go over all params in the group
if preConditionValue == group.PRE_CONDITION_MATCH:
while inputLoop:
for param in group.parameters.itervalues():
if not param.CONDITION:
input_param(param)
# update password list, so we know to mask them
_updateMaskedValueSet()
postConditionValue = True
# If group has a post condition, we check it after we get the input from
# all the params in the group. if the condition returns False, we loop over the group again
if group.POST_CONDITION:
postConditionValue = _getConditionValue(group.POST_CONDITION)
if postConditionValue == group.POST_CONDITION_MATCH:
inputLoop = False
else:
# we clear the value of all params in the group
# in order to re-input them by the user
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
del controller.CONF[param.CONF_NAME]
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
else:
inputLoop = False
else:
logging.debug("no post condition check for group %s" % group.GROUP_NAME)
_displaySummary()
except KeyboardInterrupt:
logging.error("keyboard interrupt caught")
raise Exception(output_messages.ERR_EXP_KEYBOARD_INTERRUPT)
except Exception:
logging.error(traceback.format_exc())
raise
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_HANDLE_PARAMS)
def _handleParams(configFile):
_addDefaultsToMaskedValueSet()
if configFile:
_handleAnswerFileParams(configFile)
else:
_handleInteractiveParams()
def _getConditionValue(matchMember):
returnValue = False
if isinstance(matchMember, types.FunctionType):
returnValue = matchMember(controller.CONF)
elif isinstance(matchMember, types.StringType):
# we assume that if we get a string as a member it is the name
# of a member of conf_params
if matchMember not in controller.CONF:
param = controller.getParamByName(matchMember)
input_param(param)
returnValue = controller.CONF[matchMember]
else:
raise TypeError("%s type (%s) is not supported" % (matchMember, type(matchMember)))
return returnValue
def _displaySummary():
print(output_messages.INFO_DSPLY_PARAMS)
print("=" * (len(output_messages.INFO_DSPLY_PARAMS) - 1))
logging.info("*** User input summary ***")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if not param.USE_DEFAULT and param.CONF_NAME in controller.CONF:
cmdOption = param.CMD_OPTION
l = 30 - len(cmdOption)
maskParam = param.MASK_INPUT
# Only call mask on a value if the param has MASK_INPUT set to True
if maskParam:
logging.info("%s: %s" % (cmdOption, mask(controller.CONF[param.CONF_NAME])))
print("%s:" % (cmdOption) + " " * l + mask(controller.CONF[param.CONF_NAME]))
else:
# Otherwise, log & display it as it is
logging.info("%s: %s" % (cmdOption, str(controller.CONF[param.CONF_NAME])))
print("%s:" % (cmdOption) + " " * l + str(controller.CONF[param.CONF_NAME]))
logging.info("*** User input summary ***")
answer = _askYesNo(output_messages.INFO_USE_PARAMS)
if not answer:
logging.debug("user chose to re-enter the user parameters")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
if not param.MASK_INPUT:
param.DEFAULT_VALUE = controller.CONF[param.CONF_NAME]
# Remove the string from mask_value_set in order
# to remove values that might be over overwritten.
removeMaskString(controller.CONF[param.CONF_NAME])
del controller.CONF[param.CONF_NAME]
if param.CONF_NAME in commandLineValues:
del commandLineValues[param.CONF_NAME]
print("")
logging.debug("calling handleParams in interactive mode")
return _handleParams(None)
else:
logging.debug("user chose to accept user parameters")
def _printAdditionalMessages():
if len(controller.MESSAGES) > 0:
print(output_messages.INFO_ADDTIONAL_MSG)
for msg in controller.MESSAGES:
print(output_messages.INFO_ADDTIONAL_MSG_BULLET % (msg))
def _addFinalInfoMsg(logFile):
"""
add info msg to the user finalizing the
successfull install of rhemv
"""
controller.MESSAGES.append(output_messages.INFO_LOG_FILE_PATH % (logFile))
controller.MESSAGES.append(
output_messages.INFO_MANIFEST_PATH % (basedefs.PUPPET_MANIFEST_DIR))
def _summaryParamsToLog():
if len(controller.CONF) > 0:
logging.debug("*** The following params were used as user input:")
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
if param.CONF_NAME in controller.CONF:
maskedValue = mask(controller.CONF[param.CONF_NAME])
logging.debug("%s: %s" % (param.CMD_OPTION, maskedValue))
def runSequences():
controller.runAllSequences()
def _main(options, configFile=None, logFile=None):
print(output_messages.INFO_HEADER)
print("\n" + output_messages.INFO_LOG_FILE_PATH % logFile)
# Get parameters
_handleParams(configFile)
# Generate answer file, only if no answer file was provided
if not options.answer_file:
path = _getanswerfilepath()
if path:
generateAnswerFile(path)
# If an answer file was provided, some options may have been overriden
# Overwrite answer file with updated options
else:
generateAnswerFile(options.answer_file)
# Update masked_value_list with user input values
_updateMaskedValueSet()
# Print masked conf
logging.debug(mask(controller.CONF))
# Start configuration stage
print("\n" + output_messages.INFO_INSTALL)
# Initialize Sequences
initPluginsSequences()
# Run main setup logic
runSequences()
# Lock rhevm version
# _lockRpmVersion()
# Print info
_addFinalInfoMsg(logFile)
print(output_messages.INFO_INSTALL_SUCCESS)
def remove_remote_var_dirs(options, config, messages):
"""
Removes the temp directories on remote hosts,
doesn't remove data on localhost
"""
for host in filtered_hosts(config):
try:
host_dir = config['HOST_DETAILS'][host]['tmpdir']
except KeyError:
# Nothing was added to this host yet, so we have nothing to delete
continue
if options.debug:
# we keep temporary directories on hosts in debug mode
messages.append(
'Note temporary directory {host_dir} on host {host} was '
'not deleted for debugging purposes.'.format(**locals())
)
continue
logging.debug(output_messages.INFO_REMOVE_REMOTE_VAR % (host_dir, host))
server = utils.ScriptRunner(host)
server.append('rm -rf %s' % host_dir)
try:
server.execute()
except Exception as e:
msg = output_messages.ERR_REMOVE_REMOTE_VAR % (host_dir, host)
logging.error(msg)
logging.exception(e)
messages.append(utils.color_text(msg, 'red'))
def remove_temp_files():
"""
Removes any temporary files generated during
configuration
"""
for myfile in tmpfiles:
try:
os.unlink(myfile)
except Exception as e:
msg = output_messages.ERR_REMOVE_TMP_FILE % (myfile)
logging.error(msg)
logging.exception(e)
controller.MESSAGES.append(utils.color_text(msg, 'red'))
def generateAnswerFile(outputFile, overrides={}):
sep = os.linesep
fmt = ("%(comment)s%(separator)s%(conf_name)s=%(default_value)s"
"%(separator)s")
outputFile = os.path.expanduser(outputFile)
# Remove the answer file so it can be recreated as the current user with
# the mode -rw-------
if os.path.exists(outputFile):
os.remove(outputFile)
fd = os.open(outputFile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600)
with os.fdopen(fd, "w") as ans_file:
ans_file.write("[general]%s" % os.linesep)
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
comm = param.USAGE or ''
comm = textwrap.fill(comm,
initial_indent='%s# ' % sep,
subsequent_indent='# ',
break_long_words=False)
value = controller.CONF.get(param.CONF_NAME,
param.DEFAULT_VALUE)
args = {'comment': comm,
'separator': sep,
'default_value': overrides.get(param.CONF_NAME, value),
'conf_name': param.CONF_NAME}
ans_file.write(fmt % args)
def single_step_aio_install(options, logFile):
"""Installs an All in One host on this host."""
options.install_hosts = utils.get_localhost_ip()
# Also allow the command line to set values for any of these options
# by testing if they have been set before we set them here
if not options.novanetwork_pubif:
options.novanetwork_pubif = utils.device_from_ip(options.install_hosts)
if not options.novacompute_privif:
options.novacompute_privif = ''
if not options.novanetwork_privif:
options.novanetwork_privif = ''
single_step_install(options, logFile)
def single_step_install(options, logFile):
answerfilepath = _gettmpanswerfilepath()
if not answerfilepath:
_printAdditionalMessages()
return
# We're going to generate the answerfile and run Packstack in a single step
# todo this we generate the answerfile and pass in some override variables to
# override the default hosts
overrides = {}
hosts = options.install_hosts
hosts = [host.strip() for host in hosts.split(',')]
for group in controller.getAllGroups():
for param in group.parameters.itervalues():
# and directives that contain _HOST are set to the controller node
if param.CONF_NAME.find("_HOST") != -1:
overrides[param.CONF_NAME] = hosts[0]
# If there are more than one host, all but the first are a compute nodes
if len(hosts) > 1:
overrides["CONFIG_COMPUTE_HOSTS"] = ','.join(hosts[1:])
# We can also override defaults with command line options
_set_command_line_values(options)
for key, value in commandLineValues.items():
overrides[key] = value
generateAnswerFile(answerfilepath, overrides)
_main(options, answerfilepath, logFile)
def initCmdLineParser():
"""
Initiate the optparse object, add all the groups and general command line flags
and returns the optparse object
"""
# Init parser and all general flags
usage = "usage: %prog [options] [--help]"
parser = OptionParser(usage=usage, version="%prog {0} {1}".format(version.release_string(), version.version_string()))
parser.add_option("--gen-answer-file", help="Generate a template of an answer file.")
parser.add_option("--answer-file", help="Runs the configuration in non-interactive mode, extracting all information from the"
"configuration file. using this option excludes all other options")
parser.add_option("--install-hosts", help="Install on a set of hosts in a single step. The format should be a comma separated list "
"of hosts, the first is setup as a controller, and the others are setup as compute nodes."
"if only a single host is supplied then it is setup as an all in one installation. An answerfile "
"will also be generated and should be used if Packstack needs to be run a second time ")
parser.add_option("--allinone", action="store_true", help="Shorthand for --install-hosts=<local ipaddr> --novanetwork-pubif=<dev> "
"--novacompute-privif=lo --novanetwork-privif=lo --os-swift-install=y --nagios-install=y "
", this option can be used to install an all in one OpenStack on this host")
parser.add_option("-t", "--timeout", default=300, help="The timeout for puppet Exec calls")
parser.add_option("-o", "--options", action="store_true", dest="options", help="Print details on options available in answer file(rst format)")
parser.add_option("-d", "--debug", action="store_true", default=False, help="Enable debug in logging")
parser.add_option("-y", "--dry-run", action="store_true", default=False, help="Don't execute, just generate manifests")
# For each group, create a group option
for group in controller.getAllGroups():
groupParser = OptionGroup(parser, group.DESCRIPTION)
for param in group.parameters.itervalues():
cmdOption = param.CMD_OPTION
paramUsage = param.USAGE
optionsList = param.OPTION_LIST
useDefault = param.USE_DEFAULT
if not useDefault:
groupParser.add_option("--%s" % cmdOption, help=paramUsage)
# Add group parser to main parser
parser.add_option_group(groupParser)
return parser
def printOptions():
"""
print and document the available options to the answer file (rst format)
"""
# For each group, create a group option
for group in controller.getAllGroups():
print("%s" % group.DESCRIPTION)
print("-" * len(group.DESCRIPTION) + "\n")
for param in group.parameters.itervalues():
cmdOption = param.CONF_NAME
paramUsage = param.USAGE
optionsList = param.OPTION_LIST or ""
print("%s" % (("**%s**" % str(cmdOption)).ljust(30)))
print(" %s" % paramUsage + "\n")
def plugin_compare(x, y):
"""
Used to sort the plugin file list
according to the number at the end of the plugin module
"""
x_match = re.search(".+\_(\d\d\d)", x)
x_cmp = x_match.group(1)
y_match = re.search(".+\_(\d\d\d)", y)
y_cmp = y_match.group(1)
return int(x_cmp) - int(y_cmp)
def loadPlugins():
"""
Load All plugins from ./plugins
"""
sys.path.append(basedefs.DIR_PLUGINS)
sys.path.append(basedefs.DIR_MODULES)
fileList = [f for f in os.listdir(basedefs.DIR_PLUGINS) if f[0] != "_"]
fileList = sorted(fileList, cmp=plugin_compare)
for item in fileList:
# Looking for files that end with ###.py, example: a_plugin_100.py
match = re.search("^(.+\_\d\d\d)\.py$", item)
if match:
try:
moduleToLoad = match.group(1)
logging.debug("importing module %s, from file %s", moduleToLoad, item)
moduleobj = __import__(moduleToLoad)
moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)
globals()[moduleToLoad] = moduleobj
checkPlugin(moduleobj)
controller.addPlugin(moduleobj)
except:
logging.error("Failed to load plugin from file %s", item)
logging.error(traceback.format_exc())
raise Exception("Failed to load plugin from file %s" % item)
def checkPlugin(plugin):
for funcName in ['initConfig', 'initSequences']:
if not hasattr(plugin, funcName):
raise ImportError("Plugin %s does not contain the %s function" % (plugin.__class__, funcName))
def countCmdLineFlags(options, flag):
"""
counts all command line flags that were supplied, excluding the supplied flag name
"""
counter = 0
# make sure only flag was supplied
for key, value in options.__dict__.items():
if key in (flag, 'debug', 'timeout', 'dry_run', 'default_password'):
next
# If anything but flag was called, increment
elif value:
counter += 1
return counter
def validateSingleFlag(options, flag):
counter = countCmdLineFlags(options, flag)
if counter > 0:
flag = flag.replace("_", "-")
msg = output_messages.ERR_ONLY_1_FLAG % ("--%s" % flag)
raise FlagValidationError(msg)
def initPluginsConfig():
for plugin in controller.getAllPlugins():
plugin.initConfig(controller)
def initPluginsSequences():
for plugin in controller.getAllPlugins():
plugin.initSequences(controller)
def _set_command_line_values(options):
for key, value in options.__dict__.items():
# Replace the _ with - in the string since optparse replace _ with -
for group in controller.getAllGroups():
param = group.search("CMD_OPTION", key.replace("_", "-"))
if len(param) > 0 and value:
commandLineValues[param[0].CONF_NAME] = value
def main():
options = ""
try:
# Load Plugins
loadPlugins()
initPluginsConfig()
optParser = initCmdLineParser()
# Do the actual command line parsing
# Try/Except are here to catch the silly sys.exit(0) when calling rhevm-setup --help
(options, args) = optParser.parse_args()
if options.options:
printOptions()
raise SystemExit
# Initialize logging
logFile = initLogging(options.debug)
# Parse parameters
runConfiguration = True
confFile = None
controller.CONF['DEFAULT_EXEC_TIMEOUT'] = options.timeout
controller.CONF['DRY_RUN'] = options.dry_run
controller.CONF['DIR_LOG'] = basedefs.DIR_LOG
# If --gen-answer-file was supplied, do not run main
if options.gen_answer_file:
answerfilepath = _gettmpanswerfilepath()
if not answerfilepath:
_printAdditionalMessages()
return
# We can also override defaults with command line options
overrides = {}
_set_command_line_values(options)
for key, value in commandLineValues.items():
overrides[key] = value
generateAnswerFile(answerfilepath, overrides)
_handleParams(answerfilepath)
generateAnswerFile(options.gen_answer_file)
# Are we installing an all in one
elif options.allinone:
if getattr(options, 'answer_file', None):
msg = ('Please use either --allinone or --answer-file, '
'but not both.')
raise FlagValidationError(msg)
single_step_aio_install(options, logFile)
# Are we installing in a single step
elif options.install_hosts:
single_step_install(options, logFile)
# Otherwise, run main()
else:
# Make sure only --answer-file was supplied
if options.answer_file:
validateSingleFlag(options, "answer_file")
# If using an answer file, setting a default password
# does not really make sense
if getattr(options, 'default_password', None):
msg = ('Please do not set --default-password '
'when specifying an answer file.')
raise FlagValidationError(msg)
confFile = os.path.expanduser(options.answer_file)
if not os.path.exists(confFile):
raise Exception(output_messages.ERR_NO_ANSWER_FILE % confFile)
else:
_set_command_line_values(options)
_main(options, confFile, logFile)
except FlagValidationError as ex:
optParser.error(str(ex))
except Exception as e:
logging.error(traceback.format_exc())
print("\n" + utils.color_text("ERROR : " + str(e), 'red'))
print(output_messages.ERR_CHECK_LOG_FILE_FOR_MORE_INFO % (logFile))
sys.exit(1)
finally:
remove_remote_var_dirs(options, controller.CONF, controller.MESSAGES)
remove_temp_files()
# Always print user params to log
_printAdditionalMessages()
_summaryParamsToLog()
if __name__ == "__main__":
main()
| 37.912959 | 152 | 0.609306 |
a95ffe8d3d402851d617b81912e5709b044d68c4 | 82,848 | py | Python | sdk/tables/azure-data-tables/tests/test_table_entity_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2021-04-30T04:44:41.000Z | 2021-04-30T04:44:41.000Z | sdk/tables/azure-data-tables/tests/test_table_entity_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | null | null | null | sdk/tables/azure-data-tables/tests/test_table_entity_async.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | null | null | null | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from base64 import b64encode
from datetime import datetime, timedelta
from dateutil.tz import tzutc, tzoffset
from math import isnan
import uuid
from devtools_testutils import AzureTestCase
from azure.core import MatchConditions
from azure.core.credentials import AzureSasCredential
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError,
)
from azure.data.tables import (
TableSasPermissions,
AccessPolicy,
UpdateMode,
generate_table_sas,
TableEntity,
EntityProperty,
EdmType
)
from azure.data.tables.aio import TableServiceClient
from _shared.asynctestcase import AsyncTableTestCase
from preparers import TablesPreparer
class StorageTableEntityTest(AzureTestCase, AsyncTableTestCase):
async def _set_up(self, tables_storage_account_name, tables_primary_storage_account_key):
account_url = self.account_url(tables_storage_account_name, "table")
self.ts = TableServiceClient(account_url, tables_primary_storage_account_key)
self.table_name = self.get_resource_name('uttable')
self.table = self.ts.get_table_client(self.table_name)
if self.is_live:
try:
await self.ts.create_table(table_name=self.table_name)
except ResourceExistsError:
pass
self.query_tables = []
async def _tear_down(self):
if self.is_live:
try:
await self.ts.delete_table(self.table_name)
except:
pass
for table_name in self.query_tables:
try:
await self.ts.delete_table(table_name)
except:
pass
await self.ts.close()
# --Helpers-----------------------------------------------------------------
async def _create_query_table(self, entity_count):
"""
Creates a table with the specified name and adds entities with the
default set of values. PartitionKey is set to 'MyPartition' and RowKey
is set to a unique counter value starting at 1 (as a string).
"""
table_name = self.get_resource_name('querytable')
table = await self.ts.create_table(table_name)
self.query_tables.append(table_name)
client = self.ts.get_table_client(table_name)
entity = self._create_random_entity_dict()
for i in range(1, entity_count + 1):
entity['RowKey'] = entity['RowKey'] + str(i)
await client.create_entity(entity=entity)
return client
def _create_random_base_entity_dict(self):
"""
Creates a dict-based entity with only pk and rk.
"""
partition = self.get_resource_name('pk')
row = self.get_resource_name('rk')
return {
'PartitionKey': partition,
'RowKey': row,
}
def _create_pk_rk(self, pk, rk):
try:
pk = pk if pk is not None else self.get_resource_name('pk').decode('utf-8')
rk = rk if rk is not None else self.get_resource_name('rk').decode('utf-8')
except AttributeError:
pk = pk if pk is not None else self.get_resource_name('pk')
rk = rk if rk is not None else self.get_resource_name('rk')
return pk, rk
async def _insert_two_opposite_entities(self, pk=None, rk=None):
entity1 = self._create_random_entity_dict()
resp = await self.table.create_entity(entity1)
partition, row = self._create_pk_rk(pk, rk)
properties = {
'PartitionKey': partition + u'1',
'RowKey': row + u'1',
'age': 49,
'sex': u'female',
'married': False,
'deceased': True,
'optional': None,
'ratio': 5.2,
'evenratio': 6.0,
'large': 39999011,
'Birthday': datetime(1993, 4, 1, tzinfo=tzutc()),
'birthday': datetime(1990, 4, 1, tzinfo=tzutc()),
'binary': b'binary-binary',
'other': EntityProperty(value=40, type=EdmType.INT32),
'clsid': uuid.UUID('c8da6455-213e-42d9-9b79-3f9149a57833')
}
entity = TableEntity(**properties)
await self.table.create_entity(entity)
return entity1, resp
def _create_random_entity_dict(self, pk=None, rk=None):
"""
Creates a dictionary-based entity with fixed values, using all
of the supported data types.
"""
partition = pk if pk is not None else self.get_resource_name('pk')
row = rk if rk is not None else self.get_resource_name('rk')
properties = {
'PartitionKey': partition,
'RowKey': row,
'age': 39,
'sex': 'male',
'married': True,
'deceased': False,
'optional': None,
'ratio': 3.1,
'evenratio': 3.0,
'large': 933311100,
'Birthday': datetime(1973, 10, 4, tzinfo=tzutc()),
'birthday': datetime(1970, 10, 4, tzinfo=tzutc()),
'binary': b'binary',
'other': EntityProperty(value=20, type=EdmType.INT32),
'clsid': uuid.UUID('c9da6455-213d-42c9-9a79-3e9149a57833')
}
return TableEntity(**properties)
async def _insert_random_entity(self, pk=None, rk=None):
entity = self._create_random_entity_dict(pk, rk)
metadata = await self.table.create_entity(entity=entity)
return entity, metadata['etag']
def _create_updated_entity_dict(self, partition, row):
"""
Creates a dictionary-based entity with fixed values, with a
different set of values than the default entity. It
adds fields, changes field values, changes field types,
and removes fields when compared to the default entity.
"""
return {
'PartitionKey': partition,
'RowKey': row,
'age': 'abc',
'sex': 'female',
'sign': 'aquarius',
'birthday': datetime(1991, 10, 4, tzinfo=tzutc())
}
def _assert_default_entity(self, entity, headers=None):
'''
Asserts that the entity passed in matches the default entity.
'''
assert entity['age'] == 39
assert entity['sex'] == 'male'
assert entity['married'] == True
assert entity['deceased'] == False
assert not "optional" in entity
assert not "aquarius" in entity
assert entity['ratio'] == 3.1
assert entity['evenratio'] == 3.0
assert entity['large'] == 933311100
assert entity['Birthday'] == datetime(1973, 10, 4, tzinfo=tzutc())
assert entity['birthday'] == datetime(1970, 10, 4, tzinfo=tzutc())
assert entity['binary'].value == b'binary'
assert entity['other'] == 20
assert entity['clsid'] == uuid.UUID('c9da6455-213d-42c9-9a79-3e9149a57833')
def _assert_default_entity_json_full_metadata(self, entity, headers=None):
'''
Asserts that the entity passed in matches the default entity.
'''
assert entity['age'] == 39
assert entity['sex'] == 'male'
assert entity['married'] == True
assert entity['deceased'] == False
assert not "optional" in entity
assert not "aquarius" in entity
assert entity['ratio'] == 3.1
assert entity['evenratio'] == 3.0
assert entity['large'] == 933311100
assert entity['Birthday'] == datetime(1973, 10, 4, tzinfo=tzutc())
assert entity['birthday'] == datetime(1970, 10, 4, tzinfo=tzutc())
assert entity['binary'].value == b'binary'
assert entity['other'] == 20
assert entity['clsid'] == uuid.UUID('c9da6455-213d-42c9-9a79-3e9149a57833')
def _assert_default_entity_json_no_metadata(self, entity, headers=None):
'''
Asserts that the entity passed in matches the default entity.
'''
assert entity['age'] == 39
assert entity['sex'] == 'male'
assert entity['married'] == True
assert entity['deceased'] == False
assert not "optional" in entity
assert not "aquarius" in entity
assert entity['ratio'] == 3.1
assert entity['evenratio'] == 3.0
assert entity['large'] == 933311100
assert entity['Birthday'].startswith('1973-10-04T00:00:00')
assert entity['birthday'].startswith('1970-10-04T00:00:00')
assert entity['Birthday'].endswith('00Z')
assert entity['birthday'].endswith('00Z')
assert entity['binary'] == b64encode(b'binary').decode('utf-8')
assert entity['other'] == 20
assert entity['clsid'] == 'c9da6455-213d-42c9-9a79-3e9149a57833'
def _assert_updated_entity(self, entity):
'''
Asserts that the entity passed in matches the updated entity.
'''
assert entity.age == 'abc'
assert entity.sex == 'female'
assert not hasattr(entity, "married")
assert not hasattr(entity, "deceased")
assert entity.sign == 'aquarius'
assert not hasattr(entity, "optional")
assert not hasattr(entity, "ratio")
assert not hasattr(entity, "evenratio")
assert not hasattr(entity, "large")
assert not hasattr(entity, "Birthday")
assert entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc())
assert not hasattr(entity, "other")
assert not hasattr(entity, "clsid")
def _assert_merged_entity(self, entity):
'''
Asserts that the entity passed in matches the default entity
merged with the updated entity.
'''
assert entity.age == 'abc'
assert entity.sex == 'female'
assert entity.sign == 'aquarius'
assert entity.married == True
assert entity.deceased == False
assert entity.ratio == 3.1
assert entity.evenratio == 3.0
assert entity.large == 933311100
assert entity.Birthday, datetime(1973, 10, 4, tzinfo=tzutc())
assert entity.birthday, datetime(1991, 10, 4, tzinfo=tzutc())
assert entity.other == 20
assert isinstance(entity.clsid, uuid.UUID)
assert str(entity.clsid) == 'c9da6455-213d-42c9-9a79-3e9149a57833'
def _assert_valid_metadata(self, metadata):
keys = metadata.keys()
assert "version" in keys
assert "date" in keys
assert "etag" in keys
assert len(keys) == 3
# --Test cases for entities ------------------------------------------
@TablesPreparer()
async def test_url_encoding_at_symbol(self, tables_storage_account_name, tables_primary_storage_account_key):
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = {
u"PartitionKey": u"PK",
u"RowKey": u"table@storage.com",
u"Value": 100
}
await self.table.create_entity(entity)
f = u"RowKey eq '{}'".format(entity["RowKey"])
entities = self.table.query_entities(f)
count = 0
async for e in entities:
assert e.PartitionKey == entity[u"PartitionKey"]
assert e.RowKey == entity[u"RowKey"]
assert e.Value == entity[u"Value"]
await self.table.delete_entity(e.PartitionKey, e.RowKey)
count += 1
assert count == 1
entities = self.table.query_entities(f)
count = 0
async for e in entities:
count += 1
assert count == 0
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_dictionary(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
# Act
resp = await self.table.create_entity(entity=entity)
# Assert
assert resp is not None
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_hook(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
# Act
resp = await self.table.create_entity(entity=entity)
received_entity = await self.table.get_entity(
partition_key=entity["PartitionKey"],
row_key=entity["RowKey"]
)
# Assert
self._assert_valid_metadata(resp)
self._assert_default_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_no_metadata(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
headers = {'Accept': 'application/json;odata=nometadata'}
# Act
# response_hook = lambda e, h: (e, h)
resp = await self.table.create_entity(
entity=entity,
headers={'Accept': 'application/json;odata=nometadata'},
)
received_entity = await self.table.get_entity(
partition_key=entity["PartitionKey"],
row_key=entity["RowKey"],
headers=headers
)
# Assert
self._assert_valid_metadata(resp)
self._assert_default_entity_json_no_metadata(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_full_metadata(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
headers = {'Accept': 'application/json;odata=fullmetadata'}
# Act
# response_hook=lambda e, h: (e, h)
resp = await self.table.create_entity(
entity=entity,
headers=headers
)
received_entity = await self.table.get_entity(
partition_key=entity["PartitionKey"],
row_key=entity["RowKey"],
headers=headers
)
# Assert
self._assert_valid_metadata(resp)
self._assert_default_entity_json_full_metadata(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_conflict(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
with pytest.raises(ResourceExistsError):
# self.table.create_entity(entity)
await self.table.create_entity(entity=entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_large_int32_value_throws(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
dict32 = self._create_random_base_entity_dict()
dict32['large'] = EntityProperty(2 ** 31, EdmType.INT32) # TODO: this is outside the range of int32
# Assert
with pytest.raises(TypeError):
await self.table.create_entity(entity=dict32)
dict32['large'] = EntityProperty(-(2 ** 31 + 1), EdmType.INT32) # TODO: this is outside the range of int32
with pytest.raises(TypeError):
await self.table.create_entity(entity=dict32)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_large_int64_value_throws(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
dict64 = self._create_random_base_entity_dict()
dict64['large'] = EntityProperty(2 ** 63, EdmType.INT64)
# Assert
with pytest.raises(TypeError):
await self.table.create_entity(entity=dict64)
dict64['large'] = EntityProperty(-(2 ** 63 + 1), EdmType.INT64)
with pytest.raises(TypeError):
await self.table.create_entity(entity=dict64)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_with_large_int_success(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Act
dict64 = self._create_random_base_entity_dict()
dict64['large'] = EntityProperty(2 ** 50, EdmType.INT64)
# Assert
await self.table.create_entity(entity=dict64)
received_entity = await self.table.get_entity(dict64['PartitionKey'], dict64['RowKey'])
assert received_entity['large'].value == dict64['large'].value
dict64['RowKey'] = 'negative'
dict64['large'] = EntityProperty(-(2 ** 50 + 1), EdmType.INT64)
await self.table.create_entity(entity=dict64)
received_entity = await self.table.get_entity(dict64['PartitionKey'], dict64['RowKey'])
assert received_entity['large'].value == dict64['large'].value
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_missing_pk(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = {'RowKey': 'rk'}
# Act
with pytest.raises(ValueError):
resp = await self.table.create_entity(entity=entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_empty_string_pk(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = {'RowKey': 'rk', 'PartitionKey': ''}
# Act
resp = await self.table.create_entity(entity=entity)
self._assert_valid_metadata(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_missing_rk(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = {'PartitionKey': 'pk'}
# Act
with pytest.raises(ValueError):
resp = await self.table.create_entity(entity=entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_empty_string_rk(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = {'PartitionKey': 'pk', 'RowKey': ''}
# Act
resp = await self.table.create_entity(entity=entity)
self._assert_valid_metadata(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_too_many_properties(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
for i in range(255):
entity['key{0}'.format(i)] = 'value{0}'.format(i)
# Act
with pytest.raises(HttpResponseError):
resp = await self.table.create_entity(entity=entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_entity_property_name_too_long(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity['a' * 256] = 'badval'
# Act
with pytest.raises(HttpResponseError):
resp = await self.table.create_entity(entity=entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
resp = await self.table.get_entity(partition_key=entity['PartitionKey'],
row_key=entity['RowKey'])
# Assert
assert resp['PartitionKey'] == entity['PartitionKey']
assert resp['RowKey'] == entity['RowKey']
self._assert_default_entity(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_with_hook(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
# resp, headers
# response_hook=lambda e, h: (e, h)
resp = await self.table.get_entity(
partition_key=entity['PartitionKey'],
row_key=entity['RowKey'],
)
# Assert
assert resp['PartitionKey'] == entity['PartitionKey']
assert resp['RowKey'] == entity['RowKey']
self._assert_default_entity(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_if_match(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, etag = await self._insert_random_entity()
# Act
# Do a get and confirm the etag is parsed correctly by using it
# as a condition to delete.
resp = await self.table.get_entity(partition_key=entity['PartitionKey'],
row_key=entity['RowKey'])
await self.table.delete_entity(
partition_key=resp['PartitionKey'],
row_key=resp['RowKey'],
etag=etag,
match_condition=MatchConditions.IfNotModified
)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_full_metadata(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
resp = await self.table.get_entity(
entity.PartitionKey,
entity.RowKey,
headers={'accept': 'application/json;odata=fullmetadata'})
# Assert
assert resp.PartitionKey == entity.PartitionKey
assert resp.RowKey == entity.RowKey
self._assert_default_entity_json_full_metadata(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_no_metadata(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
resp = await self.table.get_entity(
partition_key=entity.PartitionKey,
row_key=entity.RowKey,
headers={'accept': 'application/json;odata=nometadata'})
# Assert
assert resp.PartitionKey == entity.PartitionKey
assert resp.RowKey == entity.RowKey
self._assert_default_entity_json_no_metadata(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_not_existing(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
# Act
with pytest.raises(ResourceNotFoundError):
await self.table.get_entity(partition_key=entity.PartitionKey,
row_key=entity.RowKey)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_get_entity_with_special_doubles(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity.update({
'inf': float('inf'),
'negativeinf': float('-inf'),
'nan': float('nan')
})
await self.table.create_entity(entity=entity)
# Act
resp = await self.table.get_entity(partition_key=entity['PartitionKey'],
row_key=entity['RowKey'])
# Assert
assert resp.inf == float('inf')
assert resp.negativeinf == float('-inf')
assert isnan(resp.nan)
finally:
await self._tear_down()
@TablesPreparer()
async def test_update_entity(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.update_entity(mode=UpdateMode.REPLACE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(
partition_key=entity.PartitionKey,
row_key=entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_updated_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_update_entity_not_existing(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
with pytest.raises(ResourceNotFoundError):
await self.table.update_entity(mode=UpdateMode.REPLACE, entity=sent_entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_update_entity_with_if_matches(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, etag = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.update_entity(
mode=UpdateMode.REPLACE,
entity=sent_entity, etag=etag,
match_condition=MatchConditions.IfNotModified)
# Assert
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_updated_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_update_entity_with_if_doesnt_match(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
with pytest.raises(HttpResponseError):
await self.table.update_entity(
mode=UpdateMode.REPLACE,
entity=sent_entity,
etag=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"',
match_condition=MatchConditions.IfNotModified)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_or_merge_entity_with_existing_entity(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.upsert_entity(mode=UpdateMode.MERGE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_merged_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_or_merge_entity_with_non_existing_entity(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
resp = await self.table.upsert_entity(mode=UpdateMode.MERGE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(entity['PartitionKey'],
entity['RowKey'])
self._assert_valid_metadata(resp)
self._assert_updated_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_or_replace_entity_with_existing_entity(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.upsert_entity(mode=UpdateMode.REPLACE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_updated_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_insert_or_replace_entity_with_non_existing_entity(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
resp = await self.table.upsert_entity(mode=UpdateMode.REPLACE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(entity['PartitionKey'],
entity['RowKey'])
assert resp is not None
self._assert_updated_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_merge_entity(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.update_entity(mode=UpdateMode.MERGE, entity=sent_entity)
# Assert
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_merged_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_merge_entity_not_existing(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
# Act
sent_entity = self._create_updated_entity_dict(entity['PartitionKey'], entity['RowKey'])
with pytest.raises(ResourceNotFoundError):
await self.table.update_entity(mode=UpdateMode.MERGE, entity=sent_entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_merge_entity_with_if_matches(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, etag = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.update_entity(mode=UpdateMode.MERGE,
entity=sent_entity, etag=etag,
match_condition=MatchConditions.IfNotModified)
# Assert
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
self._assert_valid_metadata(resp)
self._assert_merged_entity(received_entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_merge_entity_with_if_doesnt_match(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
with pytest.raises(HttpResponseError):
await self.table.update_entity(mode=UpdateMode.MERGE,
entity=sent_entity,
etag='W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"',
match_condition=MatchConditions.IfNotModified)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_delete_entity(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
resp = await self.table.delete_entity(partition_key=entity.PartitionKey, row_key=entity.RowKey)
# Assert
assert resp is None
with pytest.raises(ResourceNotFoundError):
await self.table.get_entity(entity.PartitionKey, entity.RowKey)
finally:
await self._tear_down()
@TablesPreparer()
async def test_delete_entity_not_existing(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
# Act
with pytest.raises(ResourceNotFoundError):
await self.table.delete_entity(entity['PartitionKey'], entity['RowKey'])
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_delete_entity_with_if_matches(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, etag = await self._insert_random_entity()
# Act
resp = await self.table.delete_entity(entity.PartitionKey, entity.RowKey, etag=etag,
match_condition=MatchConditions.IfNotModified)
# Assert
assert resp is None
with pytest.raises(ResourceNotFoundError):
await self.table.get_entity(entity.PartitionKey, entity.RowKey)
finally:
await self._tear_down()
@TablesPreparer()
async def test_delete_entity_with_if_doesnt_match(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
# Act
with pytest.raises(HttpResponseError):
await self.table.delete_entity(
entity.PartitionKey, entity.RowKey,
etag=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"',
match_condition=MatchConditions.IfNotModified)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_unicode_property_value(self, tables_storage_account_name, tables_primary_storage_account_key):
''' regression test for github issue #57'''
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity1 = entity.copy()
entity1.update({'Description': u'ꀕ'})
entity2 = entity.copy()
entity2.update({'RowKey': 'test2', 'Description': 'ꀕ'})
# Act
await self.table.create_entity(entity=entity1)
await self.table.create_entity(entity=entity2)
entities = []
async for e in self.table.query_entities(
"PartitionKey eq '{}'".format(entity['PartitionKey'])):
entities.append(e)
# Assert
assert len(entities) == 2
assert entities[0].Description == u'ꀕ'
assert entities[1].Description == u'ꀕ'
finally:
await self._tear_down()
@TablesPreparer()
async def test_unicode_property_name(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity1 = entity.copy()
entity1.update({u'啊齄丂狛狜': u'ꀕ'})
entity2 = entity.copy()
entity2.update({'RowKey': 'test2', u'啊齄丂狛狜': 'hello'})
# Act
await self.table.create_entity(entity=entity1)
await self.table.create_entity(entity=entity2)
entities = []
async for e in self.table.query_entities(
"PartitionKey eq '{}'".format(entity['PartitionKey'])):
entities.append(e)
# Assert
assert len(entities) == 2
assert entities[0][u'啊齄丂狛狜'] == u'ꀕ'
assert entities[1][u'啊齄丂狛狜'] == u'hello'
finally:
await self._tear_down()
@TablesPreparer()
async def test_operations_on_entity_with_partition_key_having_single_quote(self, tables_storage_account_name, tables_primary_storage_account_key):
partition_key_with_single_quote = u"a''''b"
row_key_with_single_quote = u"a''''b"
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity(pk=partition_key_with_single_quote,
rk=row_key_with_single_quote)
sent_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await self.table.upsert_entity(mode=UpdateMode.MERGE, entity=sent_entity)
self._assert_valid_metadata(resp)
received_entity = await self.table.get_entity(entity.PartitionKey, entity.RowKey)
self._assert_updated_entity(received_entity)
sent_entity['newField'] = u'newFieldValue'
resp = await self.table.update_entity(mode=UpdateMode.MERGE, entity=sent_entity)
self._assert_valid_metadata(resp)
received_entity = await self.table.get_entity(entity.PartitionKey, entity.RowKey)
self._assert_updated_entity(received_entity)
assert received_entity['newField'] == 'newFieldValue'
finally:
await self._tear_down()
@TablesPreparer()
async def test_empty_and_spaces_property_value(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity.update({
'EmptyByte': '',
'EmptyUnicode': u'',
'SpacesOnlyByte': ' ',
'SpacesOnlyUnicode': u' ',
'SpacesBeforeByte': ' Text',
'SpacesBeforeUnicode': u' Text',
'SpacesAfterByte': 'Text ',
'SpacesAfterUnicode': u'Text ',
'SpacesBeforeAndAfterByte': ' Text ',
'SpacesBeforeAndAfterUnicode': u' Text ',
})
# Act
await self.table.create_entity(entity=entity)
resp = await self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
# Assert
assert resp is not None
assert resp.EmptyByte == ''
assert resp.EmptyUnicode == u''
assert resp.SpacesOnlyByte == ' '
assert resp.SpacesOnlyUnicode == u' '
assert resp.SpacesBeforeByte == ' Text'
assert resp.SpacesBeforeUnicode == u' Text'
assert resp.SpacesAfterByte == 'Text '
assert resp.SpacesAfterUnicode == u'Text '
assert resp.SpacesBeforeAndAfterByte == ' Text '
assert resp.SpacesBeforeAndAfterUnicode == u' Text '
finally:
await self._tear_down()
@TablesPreparer()
async def test_none_property_value(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_base_entity_dict()
entity.update({'NoneValue': None})
# Act
await self.table.create_entity(entity=entity)
resp = await self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
# Assert
assert resp is not None
assert not hasattr(resp, 'NoneValue')
finally:
await self._tear_down()
@TablesPreparer()
async def test_binary_property_value(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
binary_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\t\n'
entity = self._create_random_base_entity_dict()
entity.update({'binary': b'\x01\x02\x03\x04\x05\x06\x07\x08\t\n'})
# Act
await self.table.create_entity(entity=entity)
resp = await self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
# Assert
assert resp is not None
assert resp.binary.value == binary_data
finally:
await self._tear_down()
@TablesPreparer()
async def test_timezone(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
local_tz = tzoffset('BRST', -10800)
local_date = datetime(2003, 9, 27, 9, 52, 43, tzinfo=local_tz)
entity = self._create_random_base_entity_dict()
entity.update({'date': local_date})
# Act
await self.table.create_entity(entity=entity)
resp = await self.table.get_entity(entity['PartitionKey'], entity['RowKey'])
# Assert
assert resp is not None
# times are not equal because request is made after
# assert resp.date.astimezone(tzutc()) == local_date.astimezone(tzutc())
# assert resp.date.astimezone(local_tz) == local_date
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(2)
# Act
entities = []
async for t in table.list_entities():
entities.append(t)
# Assert
assert len(entities) == 2
for entity in entities:
self._assert_default_entity(entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_each_page(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
base_entity = {
"PartitionKey": u"pk",
"RowKey": u"1",
}
for i in range(10):
if i > 5:
base_entity['PartitionKey'] += str(i)
base_entity['RowKey'] += str(i)
base_entity['value'] = i
try:
await self.table.create_entity(base_entity)
except ResourceExistsError:
pass
query_filter = u"PartitionKey eq 'pk'"
entity_count = 0
page_count = 0
async for entity_page in self.table.query_entities(query_filter, results_per_page=2).by_page():
temp_count = 0
async for ent in entity_page:
temp_count += 1
assert temp_count <= 2
page_count += 1
entity_count += temp_count
assert entity_count == 6
assert page_count == 3
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_injection_async(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table_name = self.get_resource_name('queryasynctable')
table = await self.ts.create_table_if_not_exists(table_name)
entity_a = {'PartitionKey': 'foo', 'RowKey': 'bar1', 'IsAdmin': 'admin'}
entity_b = {'PartitionKey': 'foo', 'RowKey': 'bar2', 'IsAdmin': ''}
await table.create_entity(entity_a)
await table.create_entity(entity_b)
is_user_admin = "PartitionKey eq @first and IsAdmin eq 'admin'"
entity_query = table.query_entities(is_user_admin, parameters={'first': 'foo'})
entities = []
async for e in entity_query:
entities.append(e)
assert len(entities) == 1
injection = "foo' or RowKey eq 'bar2"
injected_query = "PartitionKey eq '{}' and IsAdmin eq 'admin'".format(injection)
entity_query = table.query_entities(injected_query)
entities = []
async for e in entity_query:
entities.append(e)
assert len(entities) == 2
entity_query = table.query_entities(is_user_admin, parameters={'first': injection})
entities = []
async for e in entity_query:
entities.append(e)
assert len(entities) == 0
finally:
await self.ts.delete_table(table_name)
await self._tear_down()
@TablesPreparer()
async def test_query_special_chars(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table_name = self.get_resource_name('querytable')
table = await self.ts.create_table_if_not_exists(table_name)
entity_a = {'PartitionKey': u':@', 'RowKey': u'+,$', 'Chars': u"?'/!_^#"}
entity_b = {'PartitionKey': u':@', 'RowKey': u'=& ', 'Chars': u'?"\\{}<>%'}
await table.create_entity(entity_a)
await table.create_entity(entity_b)
entities = []
all_entities = table.query_entities("PartitionKey eq ':@'")
async for e in all_entities:
entities.append(e)
assert len(entities) == 2
entities = []
parameters = {'key': ':@'}
all_entities = table.query_entities("PartitionKey eq @key", parameters=parameters)
async for e in all_entities:
entities.append(e)
assert len(entities) == 2
entities = []
query = "PartitionKey eq ':@' and RowKey eq '+,$' and Chars eq '?''/!_^#'"
query_entities = table.query_entities(query)
async for e in query_entities:
entities.append(e)
assert len(entities) == 1
entities = []
query = "PartitionKey eq @key and RowKey eq @row and Chars eq @quote"
parameters = {'key': ':@', 'row': '+,$', 'quote': "?'/!_^#"}
query_entities = table.query_entities(query, parameters=parameters)
async for e in query_entities:
entities.append(e)
assert len(entities) == 1
entities = []
query = "PartitionKey eq ':@' and RowKey eq '=& ' and Chars eq '?\"\\{}<>%'"
query_entities = table.query_entities(query)
async for e in query_entities:
entities.append(e)
assert len(entities) == 1
entities = []
query = "PartitionKey eq @key and RowKey eq @row and Chars eq @quote"
parameters = {'key': ':@', 'row': '=& ', 'quote': r'?"\{}<>%'}
query_entities = table.query_entities(query, parameters=parameters)
async for e in query_entities:
entities.append(e)
assert len(entities) == 1
finally:
await self.ts.delete_table(table_name)
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = await self._insert_two_opposite_entities()
# Act
entities = self.table.query_entities("married eq @my_param", parameters={'my_param': True})
assert entities is not None
length = 0
async for e in entities:
self._assert_default_entity(e)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_multiple_params(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': True,
'rk': entity['RowKey']
}
entities = self.table.query_entities("married eq @my_param and RowKey eq @rk", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_integers(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': 40,
}
entities = self.table.query_entities("age lt @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_floats(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': entity['ratio'] + 1,
}
entities = self.table.query_entities("ratio lt @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_datetimes(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': entity['birthday'],
}
entities = self.table.query_entities("birthday eq @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_guids(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': entity['clsid']
}
entities = self.table.query_entities("clsid eq @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_user_filter_binary(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
# Act
parameters = {
'my_param': entity['binary']
}
entities = self.table.query_entities("binary eq @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
self._assert_default_entity(entity)
length += 1
assert length == 1
finally:
self._tear_down()
@TablesPreparer()
async def test_query_user_filter_int64(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_two_opposite_entities()
large_entity = {
u"PartitionKey": u"pk001",
u"RowKey": u"rk001",
u"large_int": EntityProperty(2 ** 40, EdmType.INT64),
}
await self.table.create_entity(large_entity)
# Act
parameters = {
'my_param': large_entity['large_int'].value
}
entities = self.table.query_entities("large_int eq @my_param", parameters=parameters)
length = 0
assert entities is not None
async for entity in entities:
# self._assert_default_entity(entity)
assert large_entity['large_int'] == entity['large_int']
length += 1
assert length == 1
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_zero_entities(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(0)
# Act
entities = []
async for t in table.list_entities():
entities.append(t)
# Assert
assert len(entities) == 0
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_full_metadata(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(2)
# Act
entities = []
async for t in table.list_entities(headers={'accept': 'application/json;odata=fullmetadata'}):
entities.append(t)
# Assert
assert len(entities) == 2
for entity in entities:
self._assert_default_entity_json_full_metadata(entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_no_metadata(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(2)
# Act
entities = []
async for t in table.list_entities(headers={'accept': 'application/json;odata=nometadata'}):
entities.append(t)
# Assert
assert len(entities) == 2
for entity in entities:
self._assert_default_entity_json_no_metadata(entity)
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_with_filter(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity, _ = await self._insert_random_entity()
entity2, _ = await self._insert_random_entity(pk="foo" + entity.PartitionKey)
entity3, _ = await self._insert_random_entity(pk="bar" + entity.PartitionKey)
# Act
entities = []
async for t in self.table.query_entities(
"PartitionKey eq '{}'".format(entity.PartitionKey)):
entities.append(t)
# Assert
assert len(entities) == 1
assert entity.PartitionKey == entities[0].PartitionKey
self._assert_default_entity(entities[0])
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_invalid_filter(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
base_entity = {
u"PartitionKey": u"pk",
u"RowKey": u"rk",
u"value": 1
}
for i in range(5):
base_entity[u"RowKey"] += str(i)
base_entity[u"value"] += i
await self.table.create_entity(base_entity)
# Act
with pytest.raises(HttpResponseError):
async for t in self.table.query_entities("aaa bbb ccc"):
_ = t
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_with_select(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(2)
# Act
entities = []
async for t in table.list_entities(select=["age, sex"]):
entities.append(t)
# Assert
assert len(entities) == 2
assert entities[0].age == 39
assert entities[0].sex == 'male'
assert not hasattr(entities[0], "birthday")
assert not hasattr(entities[0], "married")
assert not hasattr(entities[0], "deceased")
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_with_top(self, tables_storage_account_name, tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(3)
# circular dependencies made this return a list not an item paged - problem when calling by page
# Act
entities = []
async for t in table.list_entities(results_per_page=2).by_page():
entities.append(t)
# Assert
assert len(entities) == 2
finally:
await self._tear_down()
@TablesPreparer()
async def test_query_entities_with_top_and_next(self, tables_storage_account_name,
tables_primary_storage_account_key):
# Arrange
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
table = await self._create_query_table(5)
# Act
resp1 = table.list_entities(results_per_page=2).by_page()
entities1 = []
async for el in await resp1.__anext__():
entities1.append(el)
resp2 = table.list_entities(results_per_page=2).by_page(
continuation_token=resp1.continuation_token)
entities2 = []
async for el in await resp2.__anext__():
entities2.append(el)
resp3 = table.list_entities(results_per_page=2).by_page(
continuation_token=resp2.continuation_token)
entities3 = []
async for el in await resp3.__anext__():
entities3.append(el)
# Assert
assert len(entities1) == 2
assert len(entities2) == 2
assert len(entities3) == 1
self._assert_default_entity(entities1[0])
self._assert_default_entity(entities1[1])
self._assert_default_entity(entities2[0])
self._assert_default_entity(entities2[1])
self._assert_default_entity(entities3[0])
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_query(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
entity, _ = await self._insert_random_entity()
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
entities = []
async for t in table.query_entities(
"PartitionKey eq '{}'".format(entity['PartitionKey'])):
entities.append(t)
# Assert
assert len(entities) == 1
self._assert_default_entity(entities[0])
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_add(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(add=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
entity = self._create_random_entity_dict()
await table.create_entity(entity=entity)
# Assert
resp = await self.table.get_entity(partition_key=entity['PartitionKey'],
row_key=entity['RowKey'])
self._assert_default_entity(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_add_inside_range(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(add=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start_pk='test', start_rk='test1',
end_pk='test', end_rk='test1',
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
entity = self._create_random_entity_dict('test', 'test1')
await table.create_entity(entity=entity)
# Assert
resp = await self.table.get_entity('test', 'test1')
self._assert_default_entity(resp)
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_add_outside_range(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(add=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start_pk='test', start_rk='test1',
end_pk='test', end_rk='test1',
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
with pytest.raises(HttpResponseError):
entity = self._create_random_entity_dict()
await table.create_entity(entity=entity)
# Assert
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_update(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
entity, _ = await self._insert_random_entity()
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(update=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
updated_entity = self._create_updated_entity_dict(entity.PartitionKey, entity.RowKey)
resp = await table.update_entity(mode=UpdateMode.REPLACE, entity=updated_entity)
received_entity = await self.table.get_entity(entity.PartitionKey,
entity.RowKey)
# Assert
self._assert_updated_entity(received_entity)
assert resp is not None
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_delete(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
entity, _ = await self._insert_random_entity()
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
permission=TableSasPermissions(delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
await table.delete_entity(entity.PartitionKey, entity.RowKey)
# Assert
with pytest.raises(ResourceNotFoundError):
await self.table.get_entity(entity.PartitionKey, entity.RowKey)
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_upper_case_table_name(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
entity, _ = await self._insert_random_entity()
# Table names are case insensitive, so simply upper case our existing table name to test
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name.upper(),
permission=TableSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
start=datetime.utcnow() - timedelta(minutes=1),
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(self.table_name)
entities = []
async for t in table.query_entities(
"PartitionKey eq '{}'".format(entity['PartitionKey'])):
entities.append(t)
# Assert
assert len(entities) == 1
self._assert_default_entity(entities[0])
finally:
await self._tear_down()
@TablesPreparer()
async def test_sas_signed_identifier(self, tables_storage_account_name, tables_primary_storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
url = self.account_url(tables_storage_account_name, "table")
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
# Arrange
entity, _ = await self._insert_random_entity()
access_policy = AccessPolicy()
access_policy.start = datetime(2011, 10, 11)
access_policy.expiry = datetime(2025, 10, 12)
access_policy.permission = TableSasPermissions(read=True)
identifiers = {'testid': access_policy}
await self.table.set_table_access_policy(identifiers)
token = self.generate_sas(
generate_table_sas,
tables_storage_account_name,
tables_primary_storage_account_key,
self.table_name,
policy_id='testid',
)
# Act
service = TableServiceClient(
self.account_url(tables_storage_account_name, "table"),
credential=AzureSasCredential(token),
)
table = service.get_table_client(table_name=self.table_name)
entities = []
async for t in table.query_entities(
"PartitionKey eq '{}'".format(entity.PartitionKey)):
entities.append(t)
# Assert
assert len(entities) == 1
self._assert_default_entity(entities[0])
finally:
await self._tear_down()
@TablesPreparer()
async def test_datetime_milliseconds(self, tables_storage_account_name, tables_primary_storage_account_key):
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
try:
entity = self._create_random_entity_dict()
entity['milliseconds'] = datetime(2011, 11, 4, 0, 5, 23, 283000, tzinfo=tzutc())
await self.table.create_entity(entity)
received_entity = await self.table.get_entity(
partition_key=entity['PartitionKey'],
row_key=entity['RowKey']
)
assert entity['milliseconds'] == received_entity['milliseconds']
finally:
await self._tear_down()
@TablesPreparer()
async def test_datetime_str_passthrough(self, tables_storage_account_name, tables_primary_storage_account_key):
await self._set_up(tables_storage_account_name, tables_primary_storage_account_key)
partition, row = self._create_pk_rk(None, None)
dotnet_timestamp = "2013-08-22T01:12:06.2608595Z"
entity = {
'PartitionKey': partition,
'RowKey': row,
'datetime1': EntityProperty(dotnet_timestamp, EdmType.DATETIME)
}
try:
await self.table.create_entity(entity)
received = await self.table.get_entity(partition, row)
assert isinstance(received['datetime1'], datetime)
assert received.datetime1.tables_service_value == dotnet_timestamp
received['datetime2'] = received.datetime1.replace(year=2020)
assert received['datetime2'].tables_service_value == ""
await self.table.update_entity(received)
updated = await self.table.get_entity(partition, row)
assert isinstance(updated['datetime1'], datetime)
assert isinstance(updated['datetime2'], datetime)
assert updated.datetime1.tables_service_value == dotnet_timestamp
finally:
await self._tear_down() | 40.472887 | 150 | 0.604903 |
94ddf52ddc6d4d7b5522fb33c5cdc69e1c8aaec3 | 297 | py | Python | examples/spot/sub_account/sub_account_enable_futures.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 512 | 2021-06-15T08:52:44.000Z | 2022-03-31T09:49:53.000Z | examples/spot/sub_account/sub_account_enable_futures.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 75 | 2021-06-20T13:49:50.000Z | 2022-03-30T02:45:31.000Z | examples/spot/sub_account/sub_account_enable_futures.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 156 | 2021-06-18T11:56:36.000Z | 2022-03-29T16:34:22.000Z | #!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
spot_client = Client(key, secret)
logging.info(spot_client.sub_account_enable_futures(email="alice@test.com"))
| 21.214286 | 76 | 0.784512 |
201e2be568312d2137397e2a9f5683364a8cd3e6 | 16,905 | py | Python | recipes/CommonVoice/ASR/transformer/train.py | fadelmuli/asr-speechbrain | d4d66d1be373cb10645e474a17dcf0e2d2243e24 | [
"Apache-2.0"
] | null | null | null | recipes/CommonVoice/ASR/transformer/train.py | fadelmuli/asr-speechbrain | d4d66d1be373cb10645e474a17dcf0e2d2243e24 | [
"Apache-2.0"
] | null | null | null | recipes/CommonVoice/ASR/transformer/train.py | fadelmuli/asr-speechbrain | d4d66d1be373cb10645e474a17dcf0e2d2243e24 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Recipe for training a Transformer ASR system with CommonVoice
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with (CTC/Att joint) beamsearch.
To run this recipe, do the following:
> python train.py hparams/transformer.yaml
With the default hyperparameters, the system employs a convolutional frontend (ContextNet) and a transformer.
The decoder is based on a Transformer decoder.
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE)
are used as basic recognition tokens.
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training split (e.g, train-clean 100 rather than the full one), and many
other possible variations.
Authors
* Titouan Parcollet 2021
* Jianyuan Zhong 2020
"""
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.distributed import run_on_main
from speechbrain.utils.data_utils import undo_padding
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
tokens_bos, _ = batch.tokens_bos
# compute features
feats = self.hparams.compute_features(wavs)
current_epoch = self.hparams.epoch_counter.current
feats = self.hparams.normalize(feats, wav_lens, epoch=current_epoch)
# Augmentation
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
# forward modules
src = self.modules.CNN(feats)
print('src: ', src.shape)
print('feats: ', feats.shape)
enc_out, pred = self.modules.Transformer(
src, tokens_bos, wav_lens, pad_idx=self.hparams.pad_index
)
print('enc_out: ', enc_out.shape)
print('pred: ', pred.shape)
# output layer for ctc log-probabilities
logits = self.modules.ctc_lin(enc_out)
p_ctc = self.hparams.log_softmax(logits)
# output layer for seq2seq log-probabilities
pred = self.modules.seq_lin(pred)
p_seq = self.hparams.log_softmax(pred)
# Compute outputs
hyps = None
if stage == sb.Stage.TRAIN:
hyps = None
elif stage == sb.Stage.VALID:
hyps = None
current_epoch = self.hparams.epoch_counter.current
if current_epoch % self.hparams.valid_search_interval == 0:
hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens)
elif stage == sb.Stage.TEST:
hyps, _ = self.hparams.beam_searcher(enc_out.detach(), wav_lens)
return p_ctc, p_seq, wav_lens, hyps
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
(p_ctc, p_seq, wav_lens, predicted_tokens,) = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = (
self.hparams.ctc_weight * loss_ctc
+ (1 - self.hparams.ctc_weight) * loss_seq
)
if stage != sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if current_epoch % valid_search_interval == 0 or (
stage == sb.Stage.TEST
):
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(
target_words, task="decode_from_list"
)
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
# compute the accuracy of the one-step-forward prediction
self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
# check if we need to switch optimizer
# if so change the optimizer from Adam to SGD
self.check_and_reset_optimizer()
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
# normalize the loss by gradient_accumulation step
(loss / self.hparams.gradient_accumulation).backward()
if self.step % self.hparams.gradient_accumulation == 0:
# gradient clipping & early stop if loss is not fini
self.check_gradients(loss)
self.optimizer.step()
self.optimizer.zero_grad()
# anneal lr every update
self.hparams.noam_annealing(self.optimizer)
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
with torch.no_grad():
predictions = self.compute_forward(batch, stage=stage)
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.acc_metric = self.hparams.acc_computer()
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ACC"] = self.acc_metric.summarize()
current_epoch = self.hparams.epoch_counter.current
valid_search_interval = self.hparams.valid_search_interval
if (
current_epoch % valid_search_interval == 0
or stage == sb.Stage.TEST
):
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
# log stats and save checkpoint at end-of-epoch
if stage == sb.Stage.VALID and sb.utils.distributed.if_main_process():
# report different epoch stages according current stage
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.stage_one_epochs:
lr = self.hparams.noam_annealing.current_lr
steps = self.hparams.noam_annealing.n_steps
optimizer = self.optimizer.__class__.__name__
else:
lr = self.hparams.lr_sgd
steps = -1
optimizer = self.optimizer.__class__.__name__
epoch_stats = {
"epoch": epoch,
"lr": lr,
"steps": steps,
"optimizer": optimizer,
}
self.hparams.train_logger.log_stats(
stats_meta=epoch_stats,
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ACC": stage_stats["ACC"], "epoch": epoch},
max_keys=["ACC"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def check_and_reset_optimizer(self):
"""reset the optimizer if training enters stage 2"""
current_epoch = self.hparams.epoch_counter.current
if not hasattr(self, "switched"):
self.switched = False
if isinstance(self.optimizer, torch.optim.SGD):
self.switched = True
if self.switched is True:
return
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
self.switched = True
def on_fit_start(self):
"""Gets called at the beginning of ``fit()``, on multiple processes
if ``distributed_count > 0`` and backend is ddp.
Default implementation compiles the jit modules, initializes
optimizers, and loads the latest checkpoint to resume training.
"""
# Run this *after* starting all processes since jit modules cannot be
# pickled.
self._compile_jit()
# Wrap modules with parallel backend after jit
self._wrap_distributed()
# Initialize optimizers after parameters are configured
self.init_optimizers()
# Load latest checkpoint to check to current epoch number
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
# if the model is resumed from stage two, reinitialize the optimizer
current_epoch = self.hparams.epoch_counter.current
if current_epoch > self.hparams.stage_one_epochs:
self.optimizer = self.hparams.SGD(self.modules.parameters())
if self.checkpointer is not None:
self.checkpointer.add_recoverable("optimizer", self.optimizer)
# Load latest checkpoint to resume training if interrupted
if self.checkpointer is not None:
self.checkpointer.recover_if_possible(
device=torch.device(self.device)
)
# Define custom data procedure
def dataio_prepare(hparams, tokenizer):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
valid_data = valid_data.filtered_sorted(sort_key="duration")
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, valid_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
info = torchaudio.info(wav)
sig = sb.dataio.dataio.read_audio(wav)
resampled = torchaudio.transforms.Resample(
info.sample_rate, hparams["sample_rate"],
)(sig)
return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
run_on_main(
prepare_common_voice,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"train_tsv_file": hparams["train_tsv_file"],
"dev_tsv_file": hparams["dev_tsv_file"],
"test_tsv_file": hparams["test_tsv_file"],
"accented_letters": hparams["accented_letters"],
"language": hparams["language"],
"skip_prep": hparams["skip_prep"],
},
)
# Defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
opt_class=hparams["Adam"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# adding objects to trainer:
asr_brain.tokenizer = tokenizer
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_valid.txt"
asr_brain.evaluate(
valid_data,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
| 36.830065 | 109 | 0.644957 |
0fd8874255c3344edf60e0ef705f553ebc824dc2 | 364 | py | Python | one_fm/tasks/execute.py | ks093/One-FM | d89b6739f12a8ad527355f7ba31184bb635e1dad | [
"MIT"
] | null | null | null | one_fm/tasks/execute.py | ks093/One-FM | d89b6739f12a8ad527355f7ba31184bb635e1dad | [
"MIT"
] | null | null | null | one_fm/tasks/execute.py | ks093/One-FM | d89b6739f12a8ad527355f7ba31184bb635e1dad | [
"MIT"
] | null | null | null | import frappe
from frappe import _
from frappe import enqueue
def daily():
"""
List of all tasks to be executed daily
"""
enqueue("one_fm.tasks.erpnext.purchase_order.due_purchase_order_payment_terms")
enqueue("one_fm.tasks.erpnext.issue.daily_open")
enqueue("one_fm.tasks.erpnext.job_opening.uncheck_publish_job_opening_on_valid_till")
| 26 | 89 | 0.766484 |
0fbf536b58c577e69518a047bc459cbce79b89b0 | 5,676 | py | Python | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/conftest.py | curanaj/airbyte-dbt-demo | f6b8ccd8f8e57b7ea84caf814b14d836338e8007 | [
"MIT"
] | null | null | null | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/conftest.py | curanaj/airbyte-dbt-demo | f6b8ccd8f8e57b7ea84caf814b14d836338e8007 | [
"MIT"
] | null | null | null | airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/conftest.py | curanaj/airbyte-dbt-demo | f6b8ccd8f8e57b7ea84caf814b14d836338e8007 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from pathlib import Path
from typing import Any, List, MutableMapping, Optional
import pytest
from airbyte_cdk.models import AirbyteCatalog, AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConnectorSpecification
from source_acceptance_test.config import Config
from source_acceptance_test.utils import ConnectorRunner, SecretDict, load_config
@pytest.fixture(name="base_path")
def base_path_fixture(pytestconfig, acceptance_test_config) -> Path:
"""Fixture to define base path for every path-like fixture"""
if acceptance_test_config.base_path:
return Path(acceptance_test_config.base_path).absolute()
return Path(pytestconfig.getoption("--acceptance-test-config")).absolute()
@pytest.fixture(name="acceptance_test_config", scope="session")
def acceptance_test_config_fixture(pytestconfig) -> Config:
"""Fixture with test's config"""
return load_config(pytestconfig.getoption("--acceptance-test-config", skip=True))
@pytest.fixture(name="connector_config_path")
def connector_config_path_fixture(inputs, base_path) -> Path:
"""Fixture with connector's config path"""
return Path(base_path) / getattr(inputs, "config_path")
@pytest.fixture(name="invalid_connector_config_path")
def invalid_connector_config_path_fixture(inputs, base_path) -> Path:
"""Fixture with connector's config path"""
return Path(base_path) / getattr(inputs, "invalid_config_path")
@pytest.fixture(name="connector_spec_path")
def connector_spec_path_fixture(inputs, base_path) -> Path:
"""Fixture with connector's specification path"""
return Path(base_path) / getattr(inputs, "spec_path")
@pytest.fixture(name="configured_catalog_path")
def configured_catalog_path_fixture(inputs, base_path) -> Optional[str]:
"""Fixture with connector's configured_catalog path"""
if getattr(inputs, "configured_catalog_path"):
return Path(base_path) / getattr(inputs, "configured_catalog_path")
return None
@pytest.fixture(name="configured_catalog")
def configured_catalog_fixture(configured_catalog_path) -> Optional[ConfiguredAirbyteCatalog]:
if configured_catalog_path:
return ConfiguredAirbyteCatalog.parse_file(configured_catalog_path)
return None
@pytest.fixture(name="catalog")
def catalog_fixture(configured_catalog: ConfiguredAirbyteCatalog) -> Optional[AirbyteCatalog]:
if configured_catalog:
return AirbyteCatalog(streams=[stream.stream for stream in configured_catalog.streams])
return None
@pytest.fixture(name="image_tag")
def image_tag_fixture(acceptance_test_config) -> str:
return acceptance_test_config.connector_image
@pytest.fixture(name="connector_config")
def connector_config_fixture(base_path, connector_config_path) -> SecretDict:
with open(str(connector_config_path), "r") as file:
contents = file.read()
return SecretDict(json.loads(contents))
@pytest.fixture(name="invalid_connector_config")
def invalid_connector_config_fixture(base_path, invalid_connector_config_path) -> MutableMapping[str, Any]:
"""TODO: implement default value - generate from valid config"""
with open(str(invalid_connector_config_path), "r") as file:
contents = file.read()
return json.loads(contents)
@pytest.fixture(name="malformed_connector_config")
def malformed_connector_config_fixture(connector_config) -> MutableMapping[str, Any]:
"""TODO: drop required field, add extra"""
malformed_config = copy.deepcopy(connector_config)
return malformed_config
@pytest.fixture(name="connector_spec")
def connector_spec_fixture(connector_spec_path) -> ConnectorSpecification:
return ConnectorSpecification.parse_file(connector_spec_path)
@pytest.fixture(name="docker_runner")
def docker_runner_fixture(image_tag, tmp_path) -> ConnectorRunner:
return ConnectorRunner(image_tag, volume=tmp_path)
@pytest.fixture(scope="session", autouse=True)
def pull_docker_image(acceptance_test_config) -> None:
"""Startup fixture to pull docker image"""
print("Pulling docker image", acceptance_test_config.connector_image)
ConnectorRunner(image_name=acceptance_test_config.connector_image, volume=Path("."))
print("Pulling completed")
@pytest.fixture(name="expected_records")
def expected_records_fixture(inputs, base_path) -> List[AirbyteRecordMessage]:
expect_records = getattr(inputs, "expect_records")
if not expect_records:
return []
with open(str(base_path / getattr(expect_records, "path"))) as f:
return [AirbyteRecordMessage.parse_raw(line) for line in f]
| 39.971831 | 117 | 0.778894 |
ddc5c2f61fc7c4430c7f763b63db2479a0cfb4a2 | 19,856 | py | Python | tests/test_conferences.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | tests/test_conferences.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | tests/test_conferences.py | bdyetton/prettychart | e8b33a7dfdc8c33d15969586be7f68172795f76d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa (PEP8 asserts)
import hmac
import hashlib
from StringIO import StringIO
import furl
from modularodm import Q
from modularodm.exceptions import ValidationError
from framework.auth.core import Auth
from website import settings
from website.models import User, Node
from website.conferences import views
from website.conferences.model import Conference
from website.conferences import utils, message
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase, fake
from tests.factories import ModularOdmFactory, FakerAttribute, ProjectFactory, UserFactory
from factory import Sequence, post_generation
def assert_absolute(url):
parsed_domain = furl.furl(settings.DOMAIN)
parsed_url = furl.furl(url)
assert_equal(parsed_domain.host, parsed_url.host)
def assert_equal_urls(first, second):
parsed_first = furl.furl(first)
parsed_first.port = None
parsed_second = furl.furl(second)
parsed_second.port = None
assert_equal(parsed_first, parsed_second)
class ConferenceFactory(ModularOdmFactory):
FACTORY_FOR = Conference
endpoint = Sequence(lambda n: 'conference{0}'.format(n))
name = FakerAttribute('catch_phrase')
active = True
@post_generation
def admins(self, create, extracted, **kwargs):
self.admins = extracted or [UserFactory()]
def create_fake_conference_nodes(n, endpoint):
nodes = []
for i in range(n):
node = ProjectFactory(is_public=True)
node.add_tag(endpoint, Auth(node.creator))
node.save()
nodes.append(node)
return nodes
class TestConferenceUtils(OsfTestCase):
def test_get_or_create_user_exists(self):
user = UserFactory()
fetched, created = utils.get_or_create_user(user.fullname, user.username, True)
assert_false(created)
assert_equal(user._id, fetched._id)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_not_exists(self):
fullname = 'Roger Taylor'
username = 'roger@queen.com'
fetched, created = utils.get_or_create_user(fullname, username, False)
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_is_spam(self):
fullname = 'John Deacon'
username = 'deacon@queen.com'
fetched, created = utils.get_or_create_user(fullname, username, True)
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_true('is_spam' in fetched.system_tags)
def test_get_or_create_node_exists(self):
node = ProjectFactory()
fetched, created = utils.get_or_create_node(node.title, node.creator)
assert_false(created)
assert_equal(node._id, fetched._id)
def test_get_or_create_node_title_not_exists(self):
title = 'Night at the Opera'
creator = UserFactory()
node = ProjectFactory(creator=creator)
fetched, created = utils.get_or_create_node(title, creator)
assert_true(created)
assert_not_equal(node._id, fetched._id)
def test_get_or_create_node_user_not_exists(self):
title = 'Night at the Opera'
creator = UserFactory()
node = ProjectFactory(title=title)
fetched, created = utils.get_or_create_node(title, creator)
assert_true(created)
assert_not_equal(node._id, fetched._id)
class ContextTestCase(OsfTestCase):
MAILGUN_API_KEY = 'mailkimp'
@classmethod
def setUpClass(cls):
super(ContextTestCase, cls).setUpClass()
settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY
@classmethod
def tearDownClass(cls):
super(ContextTestCase, cls).tearDownClass()
settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY
def make_context(self, method='POST', **kwargs):
data = {
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
}
data.update(kwargs.pop('data', {}))
data = {
key: value
for key, value in data.iteritems()
if value is not None
}
return self.app.app.test_request_context(method=method, data=data, **kwargs)
class TestProvisionNode(ContextTestCase):
def setUp(self):
super(TestProvisionNode, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.conference = ConferenceFactory()
self.body = 'dragon on my back'
self.content = 'dragon attack'
self.attachment = StringIO(self.content)
self.recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
self.conference.endpoint,
)
def make_context(self, **kwargs):
data = {
'attachment-count': '1',
'attachment-1': (self.attachment, 'attachment-1'),
'X-Mailgun-Sscore': 0,
'recipient': self.recipient,
'stripped-text': self.body,
}
data.update(kwargs.pop('data', {}))
return super(TestProvisionNode, self).make_context(data=data, **kwargs)
def test_provision(self):
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_true(self.node.is_public)
assert_in(self.conference.admins[0], self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in(self.conference.endpoint, self.node.system_tags)
assert_in(self.conference.endpoint, self.node.tags)
assert_not_in('spam', self.node.system_tags)
def test_provision_private(self):
self.conference.public_projects = False
self.conference.save()
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins[0], self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_not_in('spam', self.node.system_tags)
def test_provision_spam(self):
with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}):
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins[0], self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in('spam', self.node.system_tags)
@mock.patch('website.util.waterbutler_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = 'hammer-to-fall'
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
'upload',
'osfstorage',
'/' + self.attachment.filename,
self.node,
user=self.user,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.util.waterbutler_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload_no_file_name(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = ''
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
'upload',
'osfstorage',
'/' + settings.MISSING_FILE_NAME,
self.node,
user=self.user,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
class TestMessage(ContextTestCase):
def test_verify_signature_valid(self):
with self.make_context():
msg = message.ConferenceMessage()
msg.verify_signature()
def test_verify_signature_invalid(self):
with self.make_context(data={'signature': 'fake'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.verify_signature()
def test_is_spam_false_missing_headers(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_false_all_headers(self):
ctx = self.make_context(
method='POST',
data={
'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1,
'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0],
'X-Mailgun-Spf': message.SPF_PASS_VALUES[0],
},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_true_sscore(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_dkim(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_spf(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_subject(self):
ctx = self.make_context(
method='POST',
data={'subject': 'RE: Hip Hopera'},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.subject, 'Hip Hopera')
def test_recipient(self):
address = 'test-conference@osf.io'
ctx = self.make_context(
method='POST',
data={'recipient': address},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.recipient, address)
def test_text(self):
text = 'welcome to my nuclear family'
ctx = self.make_context(
method='POST',
data={'stripped-text': text},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.text, text)
def test_sender_name(self):
names = [
(' Fred', 'Fred'),
(u'Me䬟', u'Me䬟'),
(u'Fred <fred@queen.com>', u'Fred'),
(u'"Fred" <fred@queen.com>', u'Fred'),
]
for name in names:
with self.make_context(data={'from': name[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_name, name[1])
def test_route_invalid_pattern(self):
with self.make_context(data={'recipient': 'spam@osf.io'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_invalid_test(self):
recipient = '{0}conf-talk@osf.io'.format('' if settings.DEV_MODE else 'test-')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_valid(self):
recipient = '{0}conf-talk@osf.io'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
assert_equal(msg.conference_name, 'conf')
assert_equal(msg.conference_category, 'talk')
def test_attachments_count_zero(self):
with self.make_context(data={'attachment-count': '0'}):
msg = message.ConferenceMessage()
assert_equal(msg.attachments, [])
def test_attachments_count_one(self):
content = 'slightly mad'
sio = StringIO(content)
ctx = self.make_context(
method='POST',
data={
'attachment-count': 1,
'attachment-1': (sio, 'attachment-1'),
},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(len(msg.attachments), 1)
assert_equal(msg.attachments[0].read(), content)
class TestConferenceEmailViews(OsfTestCase):
def test_redirect_to_meetings_url(self):
url = '/presentations/'
res = self.app.get(url)
assert_equal(res.status_code, 302)
res = res.follow()
assert_equal(res.request.path, '/meetings/')
def test_conference_plain_returns_200(self):
conference = ConferenceFactory()
url = web_url_for('conference_results__plain', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_conference_data(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_data_url_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint.upper())
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_data_tag_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint.upper(),
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_results(self):
conference = ConferenceFactory()
url = web_url_for('conference_results', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestConferenceModel(OsfTestCase):
def test_endpoint_and_name_are_required(self):
with assert_raises(ValidationError):
ConferenceFactory(endpoint=None, name=fake.company()).save()
with assert_raises(ValidationError):
ConferenceFactory(endpoint='spsp2014', name=None).save()
class TestConferenceIntegration(ContextTestCase):
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_integration(self, mock_upload, mock_send_mail):
fullname = 'John Deacon'
username = 'deacon@queen.com'
title = 'good songs'
conference = ConferenceFactory()
body = 'dragon on my back'
content = 'dragon attack'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert_true(mock_upload.called)
users = User.find(Q('username', 'eq', username))
assert_equal(users.count(), 1)
nodes = Node.find(Q('title', 'eq', title))
assert_equal(nodes.count(), 1)
node = nodes[0]
assert_equal(node.get_wiki_page('home').content, body)
assert_true(mock_send_mail.called)
call_args, call_kwargs = mock_send_mail.call_args
assert_absolute(call_kwargs['conf_view_url'])
assert_absolute(call_kwargs['set_password_url'])
assert_absolute(call_kwargs['profile_url'])
assert_absolute(call_kwargs['file_url'])
assert_absolute(call_kwargs['node_url'])
@mock.patch('website.conferences.views.send_mail')
def test_integration_inactive(self, mock_send_mail):
conference = ConferenceFactory(active=False)
fullname = 'John Deacon'
username = 'deacon@queen.com'
title = 'good songs'
body = 'dragon on my back'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
res = self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
expect_errors=True,
)
assert_equal(res.status_code, 406)
call_args, call_kwargs = mock_send_mail.call_args
assert_equal(call_args, (username, views.CONFERENCE_INACTIVE))
assert_equal(call_kwargs['fullname'], fullname)
assert_equal_urls(
call_kwargs['presentations_url'],
web_url_for('conference_view', _absolute=True),
)
| 35.081272 | 102 | 0.613719 |
e9ff84d3e4c98752452231e7dcc6ae3c0580d38a | 393 | py | Python | Sources/Codable/Protocol/CodableTypes.py | Z-JaDe/AppExtension | 9a05c0dcbfe8e43aadc8d186a4d2a9d59d153447 | [
"MIT"
] | 9 | 2019-05-09T09:40:55.000Z | 2021-11-29T02:12:17.000Z | Sources/Codable/Protocol/CodableTypes.py | Z-JaDe/AppExtension | 9a05c0dcbfe8e43aadc8d186a4d2a9d59d153447 | [
"MIT"
] | null | null | null | Sources/Codable/Protocol/CodableTypes.py | Z-JaDe/AppExtension | 9a05c0dcbfe8e43aadc8d186a4d2a9d59d153447 | [
"MIT"
] | 1 | 2020-03-07T06:55:40.000Z | 2020-03-07T06:55:40.000Z |
transform_types = [
'Bool','String',
'Double', 'Float', 'CGFloat',
'Int', 'Int8', 'Int16', 'Int32', 'Int64',
'UInt', 'UInt8', 'UInt16', 'UInt32', 'UInt64'
]
literal_type = [
'Integer',
'Float',
'String',
'Boolean'
]
| 26.2 | 64 | 0.328244 |
588f0890d19f43ed8568be4752a6b7c78c7db786 | 16,438 | py | Python | Bio/motifs/pfm.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 1 | 2022-01-04T21:38:03.000Z | 2022-01-04T21:38:03.000Z | Bio/motifs/pfm.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | null | null | null | Bio/motifs/pfm.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 1 | 2021-07-06T08:02:11.000Z | 2021-07-06T08:02:11.000Z | # Copyright 2015 by Gert Hulselmans. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Parse various position frequency matrix format files."""
import re
from Bio import motifs
class Record(list):
"""Class to store the information in a position frequency matrix table.
The record inherits from a list containing the individual motifs.
"""
def __str__(self):
return "\n".join(str(motif) for motif in self)
def read(handle, pfm_format):
"""Read motif(s) from a file in various position frequency matrix formats.
Return the record of PFM(s).
Call the appropriate routine based on the format passed.
"""
# Supporting underscores here for backward compatibility
pfm_format = pfm_format.lower().replace("_", "-")
if pfm_format == "pfm-four-columns":
record = _read_pfm_four_columns(handle)
return record
elif pfm_format == "pfm-four-rows":
record = _read_pfm_four_rows(handle)
return record
else:
raise ValueError("Unknown Position Frequency matrix format '%s'" % pfm_format)
def _read_pfm_four_columns(handle):
"""Read motifs in position frequency matrix format (4 columns) from a file handle.
# cisbp
Pos A C G T
1 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
2 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
3 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
4 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
5 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
6 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
7 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
8 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
# c2h2 zfs
Gene ENSG00000197372
Pos A C G T
1 0.341303 0.132427 0.117054 0.409215
2 0.283785 0.077066 0.364552 0.274597
3 0.491055 0.078208 0.310520 0.120217
4 0.492621 0.076117 0.131007 0.300256
5 0.250645 0.361464 0.176504 0.211387
6 0.276694 0.498070 0.197793 0.027444
7 0.056317 0.014631 0.926202 0.002850
8 0.004470 0.007769 0.983797 0.003964
9 0.936213 0.058787 0.002387 0.002613
10 0.004352 0.004030 0.002418 0.989200
11 0.013277 0.008165 0.001991 0.976567
12 0.968132 0.002263 0.002868 0.026737
13 0.397623 0.052017 0.350783 0.199577
14 0.000000 0.000000 1.000000 0.000000
15 1.000000 0.000000 0.000000 0.000000
16 0.000000 0.000000 1.000000 0.000000
17 0.000000 0.000000 1.000000 0.000000
18 1.000000 0.000000 0.000000 0.000000
19 0.000000 1.000000 0.000000 0.000000
20 1.000000 0.000000 0.000000 0.000000
# c2h2 zfs
Gene FBgn0000210
Motif M1734_0.90
Pos A C G T
1 0.25 0.0833333 0.0833333 0.583333
2 0.75 0.166667 0.0833333 0
3 0.833333 0 0 0.166667
4 1 0 0 0
5 0 0.833333 0.0833333 0.0833333
6 0.333333 0 0 0.666667
7 0.833333 0 0 0.166667
8 0.5 0 0.333333 0.166667
9 0.5 0.0833333 0.166667 0.25
10 0.333333 0.25 0.166667 0.25
11 0.166667 0.25 0.416667 0.166667
# flyfactorsurvey (cluster buster)
>AbdA_Cell_FBgn0000014
1 3 0 14
0 0 0 18
16 0 0 2
18 0 0 0
1 0 0 17
0 0 6 12
15 1 2 0
# homer
>ATGACTCATC AP-1(bZIP)/ThioMac-PU.1-ChIP-Seq(GSE21512)/Homer 6.049537 -1.782996e+03 0 9805.3,5781.0,3085.1,2715.0,0.00e+00
0.419 0.275 0.277 0.028
0.001 0.001 0.001 0.997
0.010 0.002 0.965 0.023
0.984 0.003 0.001 0.012
0.062 0.579 0.305 0.054
0.026 0.001 0.001 0.972
0.043 0.943 0.001 0.012
0.980 0.005 0.001 0.014
0.050 0.172 0.307 0.471
0.149 0.444 0.211 0.195
# hocomoco
> AHR_si
40.51343240527031 18.259112547756697 56.41253757072521 38.77363485291994
10.877470982533044 11.870876719950774 34.66312982331297 96.54723985087516
21.7165707818416 43.883079837598544 20.706746561638717 67.6523201955933
2.5465132509466635 1.3171620263517245 145.8637051322628 4.231336967110781
0.0 150.35847450464382 1.4927836298652875 2.1074592421627525
3.441039751299748 0.7902972158110341 149.37613720253387 0.3512432070271259
0.0 3.441039751299748 0.7024864140542533 149.81519121131782
0.0 0.0 153.95871737667187 0.0
43.07922333291745 66.87558226865211 16.159862546986584 27.844049228115868
# neph
UW.Motif.0001 atgactca
0.772949 0.089579 0.098612 0.038860
0.026652 0.004653 0.025056 0.943639
0.017663 0.023344 0.918728 0.040264
0.919596 0.025414 0.029759 0.025231
0.060312 0.772259 0.104968 0.062462
0.037406 0.020643 0.006667 0.935284
0.047316 0.899024 0.026928 0.026732
0.948639 0.019497 0.005737 0.026128
# tiffin
T A G C
30 0 28 40
0 0 0 99
0 55 14 29
0 99 0 0
20 78 0 0
0 52 7 39
19 46 11 22
0 60 38 0
0 33 0 66
73 0 25 0
99 0 0 0
"""
record = Record()
motif_name = None
motif_nbr = 0
motif_nbr_added = 0
default_nucleotide_order = ["A", "C", "G", "T"]
nucleotide_order = default_nucleotide_order
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
for line in handle:
line = line.strip()
if line:
columns = line.split()
nbr_columns = len(columns)
if line.startswith("#"):
# Skip comment lines.
continue
elif line.startswith(">"):
# Parse ">AbdA_Cell_FBgn0000014" and "> AHR_si" like lines and put the part after ">" as motif name.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = line[1:].strip()
nucleotide_order = default_nucleotide_order
elif columns[0] == "Gene":
# Parse "Gene ENSG00000197372" like lines and put the gene name as motif name.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = columns[1]
nucleotide_order = default_nucleotide_order
elif columns[0] == "Motif":
# Parse "Motif M1734_0.90" like lines.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = columns[1]
nucleotide_order = default_nucleotide_order
elif columns[0] == "Pos":
# Parse "Pos A C G T" like lines and change nucleotide order if necessary.
if nbr_columns == 5:
# If the previous line was not a "Gene ENSG00000197372" like line, a new motif starts here.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
nucleotide_order = default_nucleotide_order
if set(columns[1:]) == set(default_nucleotide_order):
nucleotide_order = columns[1:]
elif columns[0] in default_nucleotide_order:
# Parse "A C G T" like lines and change nucleotide order if necessary.
if nbr_columns == 4:
nucleotide_order = default_nucleotide_order
if set(columns) == set(default_nucleotide_order):
nucleotide_order = columns
else:
# Parse matrix columns lines and use the correct nucleotide order.
if nbr_columns == 4:
matrix_columns = columns
elif nbr_columns == 5:
matrix_columns = columns[1:]
else:
continue
if motif_nbr == motif_nbr_added:
# A new motif matrix starts here, so reinitialize variables for the new motif.
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
motif_nbr += 1
[
nucleotide_counts[nucleotide].append(float(nucleotide_count))
for nucleotide, nucleotide_count in zip(
nucleotide_order, matrix_columns
)
]
else:
# Empty lines can be separators between motifs.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = None
nucleotide_order = default_nucleotide_order
# nucleotide_counts = {'A': [], 'C': [], 'G': [], 'T': []}
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
return record
def _read_pfm_four_rows(handle):
"""Read motifs in position frequency matrix format (4 rows) from a file handle.
# hdpi
A 0 5 6 5 1 0
C 1 1 0 0 0 4
G 5 0 0 0 3 0
T 0 0 0 1 2 2
# yetfasco
A 0.5 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.0833333334583333
T 0.0 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 0.0 0.0833333334583333
G 0.0 1.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 1.0 0.249999999875
C 0.5 0.0 1.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.583333333208333
# flyfactorsurvey ZFP finger
A | 92 106 231 135 0 1 780 28 0 700 739 94 60 127 130
C | 138 82 129 81 774 1 3 1 0 6 17 49 193 122 148
G | 270 398 54 164 7 659 1 750 755 65 1 41 202 234 205
T | 290 204 375 411 9 127 6 11 36 20 31 605 335 307 308
# scertf pcm
A | 9 1 1 97 1 94
T | 80 1 97 1 1 2
C | 9 97 1 1 1 2
G | 2 1 1 1 97 2
# scertf pfm
A | 0.090 0.010 0.010 0.970 0.010 0.940
C | 0.090 0.970 0.010 0.010 0.010 0.020
G | 0.020 0.010 0.010 0.010 0.970 0.020
T | 0.800 0.010 0.970 0.010 0.010 0.020
# idmmpmm
> abd-A
0.218451749734889 0.0230646871686108 0.656680805938494 0.898197242841994 0.040694591728526 0.132953340402969 0.74907211028632 0.628313891834571
0.0896076352067868 0.317338282078473 0.321580063626723 0.0461293743372216 0.0502386002120891 0.040694591728526 0.0284994697773065 0.0339342523860021
0.455991516436904 0.0691940615058324 0.0108695652173913 0.0217391304347826 0.0284994697773065 0.0284994697773065 0.016304347826087 0.160127253446448
0.235949098621421 0.590402969247084 0.0108695652173913 0.0339342523860021 0.880567338282079 0.797852598091198 0.206124072110286 0.17762460233298
# JASPAR
>MA0001.1 AGL3
A [ 0 3 79 40 66 48 65 11 65 0 ]
C [94 75 4 3 1 2 5 2 3 3 ]
G [ 1 0 3 4 1 0 5 3 28 88 ]
T [ 2 19 11 50 29 47 22 81 1 6 ]
or::
>MA0001.1 AGL3
0 3 79 40 66 48 65 11 65 0
94 75 4 3 1 2 5 2 3 3
1 0 3 4 1 0 5 3 28 88
2 19 11 50 29 47 22 81 1 6
"""
record = Record()
name_pattern = re.compile(r"^>\s*(.+)\s*")
row_pattern_with_nucleotide_letter = re.compile(
r"\s*([ACGT])\s*[\[|]*\s*([0-9.\-eE\s]+)\s*\]*\s*"
)
row_pattern_without_nucleotide_letter = re.compile(r"\s*([0-9.\-eE\s]+)\s*")
motif_name = None
nucleotide_counts = {}
row_count = 0
nucleotides = ["A", "C", "G", "T"]
for line in handle:
line = line.strip()
name_match = name_pattern.match(line)
row_match_with_nucleotide_letter = row_pattern_with_nucleotide_letter.match(
line
)
row_match_without_nucleotide_letter = row_pattern_without_nucleotide_letter.match(
line
)
if name_match:
motif_name = name_match.group(1)
elif row_match_with_nucleotide_letter:
(nucleotide, counts_str) = row_match_with_nucleotide_letter.group(1, 2)
current_nucleotide_counts = counts_str.split()
nucleotide_counts[nucleotide] = [
float(current_nucleotide_count)
for current_nucleotide_count in current_nucleotide_counts
]
row_count += 1
if row_count == 4:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
if motif_name:
motif.name = motif_name
record.append(motif)
motif_name = None
nucleotide_counts = {}
row_count = 0
elif row_match_without_nucleotide_letter:
current_nucleotide_counts = row_match_without_nucleotide_letter.group(
1
).split()
nucleotide_counts[nucleotides[row_count]] = [
float(current_nucleotide_count)
for current_nucleotide_count in current_nucleotide_counts
]
row_count += 1
if row_count == 4:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
if motif_name:
motif.name = motif_name
record.append(motif)
motif_name = None
nucleotide_counts = {}
row_count = 0
return record
def write(motifs):
"""Return the representation of motifs in Cluster Buster position frequency matrix format."""
lines = []
for m in motifs:
line = f">{m.name}\n"
lines.append(line)
for ACGT_counts in zip(
m.counts["A"], m.counts["C"], m.counts["G"], m.counts["T"]
):
lines.append("{:0.0f}\t{:0.0f}\t{:0.0f}\t{:0.0f}\n".format(*ACGT_counts))
# Finished; glue the lines together.
text = "".join(lines)
return text
| 39.705314 | 152 | 0.5685 |
fbe446e527025a589590a56276564868c5639218 | 4,458 | py | Python | options.py | Junlin-Yin/audio2video-v1 | 43a6a0ace3c27ed51e73e64c796ed38fa05e9f61 | [
"MIT"
] | 2 | 2020-05-24T21:16:19.000Z | 2020-05-24T21:16:21.000Z | options.py | Junlin-Yin/audio2video-v1 | 43a6a0ace3c27ed51e73e64c796ed38fa05e9f61 | [
"MIT"
] | null | null | null | options.py | Junlin-Yin/audio2video-v1 | 43a6a0ace3c27ed51e73e64c796ed38fa05e9f61 | [
"MIT"
] | null | null | null | from audio2video.__init__ import inp_dir, tar_dir
import argparse
import glob
import os
def init_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int, nargs='+', help='what steps you want to run (0-4)')
parser.add_argument('--inp_id', type=str, default=None, help='input audio file name')
parser.add_argument('--tar_id', type=str, default=None, help='target video file name')
parser.add_argument('--pass_id', type=str, default=None, help='LSTM pass (run) id')
parser.add_argument('--nthreads', type=int, default=10, help='number of threads when preprocessing')
parser.add_argument('--train', type=bool, default=True, help='need to train the network or not')
parser.add_argument('--predict', type=bool, default=True, help='need to predict (infer) sparse mouth shape or not')
parser.add_argument('--outp_norm', type=bool, default=False, help='normalize sparse mouth shape or not')
parser.add_argument('--preprocess', type=bool, default=False, help='need to re-preprocess data or not')
parser.add_argument('--vr', type=float, default=0.2, help='validation data ratio')
parser.add_argument('--step_delay', type=int, default=20, help='delay step of LSTM')
parser.add_argument('--dim_hidden', type=int, default=60, help='dimension of hidden state')
parser.add_argument('--nlayers', type=int, default=1, help='number of layers of LSTM')
parser.add_argument('--keep_prob', type=float, default=0.8, help='keep probability')
parser.add_argument('--seq_len', type=int, default=100, help='length of every training sequence')
parser.add_argument('--batch_size', type=int, default=100, help='training batch size')
parser.add_argument('--nepochs', type=int, default=300, help='number of epochs')
parser.add_argument('--grad_clip', type=int, default=10, help='parameter to clip the gradients')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--dr', type=float, default=0.99, help='decay rate of learning rate')
parser.add_argument('--b_savef', type=int, default=50, help='batch report save frequency')
parser.add_argument('--e_savef', type=int, default=5, help='checkpoint save frequency')
parser.add_argument('--argspath', type=str, default=None, help='user-specified args file path')
parser.add_argument('--showGraph', type=bool, default=False, help='generate tensorboard report or not')
parser.add_argument('--lineU', type=int, default=-1, help='height of the line seperating upper & lower teeth in upper teeth proxy')
parser.add_argument('--lineL', type=int, default=-1, help='height of the line seperating upper & lower teeth in lower teeth proxy')
parser.add_argument('--retiming', type=bool, default=True, help='use the target video after re-timing or the original one')
return parser
def check_input(opt):
assert(opt.inp_id is not None)
flag = os.path.exists('%s/mp3/%s.mp3' % (inp_dir, opt.inp_id))
assert(flag is True)
def check_target(opt):
assert(opt.tar_id is not None)
flag = os.path.exists('%s/mp4/%s.mp4' % (tar_dir, opt.tar_id))
assert(flag is True)
def check_proxy(opt):
assert(opt.lineU > 0)
assert(opt.lineL > 0)
lower_f = glob.glob('%s/proxy/%s_%s*.png' % (tar_dir, opt.tar_id, 'lower'))
upper_f = glob.glob('%s/proxy/%s_%s*.png' % (tar_dir, opt.tar_id, 'upper'))
assert(len(lower_f) == 1 and len(upper_f) == 1)
def parse(parser):
opt = parser.parse_args()
opt.steps = sorted(opt.steps)
assert(min(opt.steps)>=0 and max(opt.steps) <= 4)
if 0 in opt.steps:
assert(opt.nthreads > 0)
if 1 in opt.steps:
assert(opt.pass_id is not None)
if opt.predict is True:
check_input(opt)
if 2 in opt.steps:
assert(opt.pass_id is not None)
check_input(opt)
check_target(opt)
check_proxy(opt)
if 3 in opt.steps:
check_input(opt)
check_target(opt)
if 4 in opt.steps:
check_input(opt)
check_target(opt)
return opt
def ret_opt():
return parse(init_parser())
| 50.089888 | 147 | 0.637506 |
02e4d33565a7c9c14f326b4d50d44ba6fabf6462 | 9,430 | py | Python | BWSI_Sensor.py | armedturret/print-oops-AUV | 6810ea0a3eb48ce437026851560b932006e1670e | [
"MIT"
] | 1 | 2021-10-02T20:15:31.000Z | 2021-10-02T20:15:31.000Z | BWSI_Sensor.py | eshikohli/Autnomous-Underwater-Vehicles | 8ddc18b5705a391173f97ba11f8a70de683564d4 | [
"MIT"
] | null | null | null | BWSI_Sensor.py | eshikohli/Autnomous-Underwater-Vehicles | 8ddc18b5705a391173f97ba11f8a70de683564d4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 19:49:45 2021
@author: BWSI AUV Challenge Instructional Staff
"""
import sys
import numpy as np
import cv2
import BWSI_BuoyField
import matplotlib.pyplot as plt
class BWSI_Camera(object):
def __init__(self, max_angle=90, visibility=100):
self.__MAX_RANGE = visibility # maximum range camera can see
self.__MAX_ANGLE = max_angle # field of view of camera (+/- MAX_ANGLE degrees)
self.__SENSOR_TYPE = 'ANGLE'
# Parameters relevant for simulating camera images
self.__Wpix = 640
self.__Hpix = 480
self.__max_angle_W = 31.1
self.__max_angle_H = 24.4
# calculate the horizontal and vertical angles to the pixels
deg_per_pix_W = 2*self.__max_angle_W / self.__Wpix
deg_per_pix_H = 2*self.__max_angle_H / self.__Hpix
self.__angles_W = (np.arange(0, self.__Wpix) - (self.__Wpix-1)/2) * deg_per_pix_W
self.__angles_H = np.flip((np.arange(0, self.__Hpix) - (self.__Hpix-1)/2) * deg_per_pix_H)
self.__image_mat = np.zeros((self.__Hpix, self.__Wpix, 3), dtype=np.uint8)
background = (1, 125, 224) # measured in the pool
#background = (184, 233, 238) # blue-green
# blue-green background base image
self.__image_mat[:,:,0].fill(background[0])
self.__image_mat[:,:,1].fill(background[1])
self.__image_mat[:,:,2].fill(background[2])
self.image_snap = None
self.__buoy_image_green = None
self.__buoy_image_red = None
def get_visible_buoys(self, pos, hdg, buoy_field):
angle_left = np.mod(hdg-self.__MAX_ANGLE+360, 360)
angle_right = np.mod(hdg+self.__MAX_ANGLE, 360)
G, R = buoy_field.detectable_buoys(pos,
self.__MAX_RANGE,
angle_left,
angle_right,
self.__SENSOR_TYPE)
for i in range(len(G)):
G[i] = np.mod(G[i] - hdg + 360, 360)
if G[i]>self.__MAX_ANGLE:
G[i] = G[i] - 360.0
if G[i]<-self.__MAX_ANGLE:
G[i] = G[i] + 360.0
for i in range(len(R)):
R[i] = np.mod(R[i] - hdg + 360, 360)
if R[i]>self.__MAX_ANGLE:
R[i] = R[i] - 360.0
if R[i]<-self.__MAX_ANGLE:
R[i] = R[i] + 360.0
return G, R
def get_frame(self, pos, hdg, buoy_field):
G, R = self.get_visible_buoys_with_range(pos, hdg, buoy_field)
print(f"{len(G)}, {len(R)}")
image_snap = self.__image_mat.copy()
for g in G:
buoy_range, true_heading = g
relative_heading = true_heading - hdg
# JRE: hard-coding 1-m depth separation for now!
elev = np.degrees(np.tan(1/buoy_range))
# find the region of the image that this buoy spans
# print(f"buoy_range = {buoy_range}")
image_snap = self.add_buoy_image_to_image(image_snap, buoy_range, relative_heading, elev, 'green')
for r in R:
buoy_range, true_heading = r
relative_heading = true_heading-hdg
# JRE: hard-coding 1-m depth separation for now!
elev = np.degrees(np.tan(1/buoy_range))
# find the region of the image that this buoy spans
# print(f"buoy_range = {buoy_range}")
image_snap = self.add_buoy_image_to_image(image_snap, buoy_range, relative_heading, elev, 'red')
image_snap = image_snap + np.random.normal(0, 20, (self.__Hpix, self.__Wpix, 3)).astype(int)
#image_snap[:,:,0] = image_snap[:,:,0] + np.random.normal(0, np.sqrt(3.6), (self.__Hpix, self.__Wpix)).astype(int)
#image_snap[:,:,1] = image_snap[:,:,1] + np.random.normal(0, np.sqrt(1247.9), (self.__Hpix, self.__Wpix)).astype(int)
#image_snap[:,:,2] = image_snap[:,:,2] + np.random.normal(0, np.sqrt(137.0), (self.__Hpix, self.__Wpix)).astype(int)
image_snap[image_snap>255] = 255
image_snap[image_snap<0] = 0
# make it BGR since we're working with cv2
image_snap = np.flip(image_snap, axis=2)
return image_snap
# take an image of a buoy an add it to the simulated background
def add_buoy_image_to_image(self, image_snap, R, hdg, elev, color, buoy_length=0.28):
if color.lower() == 'red':
if self.__buoy_image_red is None:
self.__buoy_image_red = np.flip(cv2.imread('data/red_buoy_pool_img.jpg'), axis=2)
img = self.__buoy_image_red
elif color.lower() == 'green':
if self.__buoy_image_green is None:
self.__buoy_image_green = np.flip(cv2.imread('data/green_buoy_pool_img.jpg'), axis=2)
img = self.__buoy_image_green
else:
print(f"Unknown color: {color}")
sys.exit()
H = R * np.tan(np.radians(elev))
center_y = np.degrees(np.arctan( H/R ) )
center_pix_y = np.where(np.abs(self.__angles_H-center_y) == np.min(np.abs(self.__angles_H-center_y)) )
# image size is 14 cm across
H = R * np.tan(np.radians(hdg))
max_x = np.degrees(np.arctan( (H + buoy_length/2)/R ) )
min_x = np.degrees(np.arctan( (H - buoy_length/2)/R ) )
# find the pixels that fit here
xrng = np.where(np.logical_and(self.__angles_W<=max_x, self.__angles_W>=min_x))
pix_y, pix_x, nchan = img.shape
pixout_x = xrng[0][-1] - xrng[0][0] + 1
mult = pixout_x / pix_x
pixout_y = int(mult * pix_y)
yoffset = int((pixout_y+1)/2) - 1
if pixout_x>0 and pixout_y>0:
#print(f"req: {pixout_x}, {pixout_y}")
img_scaled = cv2.resize(img, (pixout_x, pixout_y))
#print(f"got {img_scaled.shape}")
# should never be > 1, but just in case...
frac = np.min((R/self.__MAX_RANGE, 1))
for ycnt in range(pixout_y):
#print(f"cpy = {center_pix_y[0]}")
y = ycnt + center_pix_y[0][0] - yoffset
if y>=0 and y<self.__Hpix:
for x in xrng[0]:
xcnt = x - xrng[0][0]
#print(f"{ycnt}, {xcnt} -> {y}, {x}")
image_snap[y, x, :] = (frac*image_snap[y,x,:] + (1-frac)*img_scaled[ycnt, xcnt, :]).astype(np.uint8)
return image_snap
def add_buoy_to_image(self, image_snap, R, hdg, elev, color, buoy_size=0.25):
if color.lower() == 'red':
buoy_color = np.array([220, 30, 45], dtype=np.uint8)
elif color.lower() == 'green':
buoy_color = np.array([30, 220, 45], dtype=np.uint8)
else:
print(f"Unknown color: {color}")
sys.exit()
# print(f"Adding buoy at rel dg = {hdg}, elev = {elev}")
vis_rng = self.__MAX_RANGE
H = R * np.tan(np.radians(elev))
max_y = np.degrees(np.arctan( (H + buoy_size/2)/R ) )
min_y = np.degrees(np.arctan( (H - buoy_size/2)/R ) )
yrng = np.where(np.logical_and(self.__angles_H<=max_y, self.__angles_H>=min_y))
H = R * np.tan(np.radians(hdg))
max_x = np.degrees(np.arctan( (H + buoy_size/2)/R ) )
min_x = np.degrees(np.arctan( (H - buoy_size/2)/R ) )
# find the pixels that fit here
xrng = np.where(np.logical_and(self.__angles_W<=max_x, self.__angles_W>=min_x))
# should never be > 1, but just in case...
frac = np.min((R/vis_rng, 1))
vis_colr = (frac * image_snap[0,0,:] + (1-frac)*buoy_color).astype(np.uint8)
for y in yrng[0]:
for x in xrng[0]:
image_snap[y, x, :] = vis_colr
return image_snap
def get_visible_buoys_with_range(self, pos, hdg, buoy_field):
angle_left = np.mod(hdg-self.__MAX_ANGLE+360, 360)
angle_right = np.mod(hdg+self.__MAX_ANGLE, 360)
G, R = buoy_field.detectable_buoys(pos,
self.__MAX_RANGE,
angle_left,
angle_right,
'RANGE_ANGLE')
return G, R
class BWSI_Laser(object):
def __init__(self, visibility):
self.__MAX_RANGE = visibility # maximum range camera can see
self.__MAX_ANGLE = 85.0 # field of view of camera (+/- MAX_ANGLE degrees)
self.__SENSOR_TYPE = 'RANGE_ANGLE'
def get_visible_buoys(self, pos, hdg, buoy_field):
angle_left = np.mod(hdg-self.__MAX_ANGLE+360, 360)
angle_right = np.mod(hdg+self.__MAX_ANGLE, 360)
G, R = buoy_field.detectable_buoys(pos,
self.__MAX_RANGE,
angle_left,
angle_right,
self.__SENSOR_TYPE)
return G, R | 40.472103 | 125 | 0.53404 |
70e9c4065b3099a6acf8d68120e4330ca33e908e | 2,125 | py | Python | mstools/builder/utils.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | mstools/builder/utils.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | mstools/builder/utils.py | Xiangyan93/mstools | 7143dbfc2eb4e82e6631652a0c1b38a793dcc678 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict, Iterator, List, Optional, Union, Literal, Tuple
import os
import shutil
from .packmol import Packmol
from ..molecule import Molecule
def build_cubic_box(packmol_exe: str, work_dir: str, file_type: Literal['pdb', 'xyz'],
smiles_list: List[str], n_mol_list: List[int], density: float,
save_tmp_file: bool = False):
"""This function is used to build a cubic box of molecules.
Parameters
----------
packmol_exe: str
Executable file of packmol.
work_dir: str
The working directory.
file_type: Literal['pdb', 'xyz']
output file type.
smiles_list: List[str]
A list of SMILES ings.
n_mol_list: List[int]
How many molecules to be created. This is corresponding to smiles_list.
density: float
the density
Returns
-------
"""
if not os.path.exists(work_dir):
os.mkdir(work_dir)
cwd = os.getcwd()
os.chdir(work_dir)
pdb_files = []
n_components = len(smiles_list)
molwt_list = [] # molecule weight of each molecule
# generate 3D-structure files of single molecules.
for i, smiles in enumerate(smiles_list):
pdb = 'mol-%i.%s' % (i, file_type)
# mol2 = 'mol-%i.mol2' % i
mol3d = Molecule(smiles)
mol3d.write(pdb, filetype=file_type)
# mol3d.write(mol2, filetype='mol2')
pdb_files.append(pdb)
molwt_list.append(mol3d.molwt)
# calculate the box size based on density and molecular weights.
mass = sum([molwt_list[i] * n_mol_list[i] for i in range(n_components)])
vol = 10 / 6.022 * mass / density
length = vol ** (1 / 3)
box_size = (length, length, length)
Packmol(exe=packmol_exe).build_box(
pdb_files=pdb_files,
n_mol_list=n_mol_list,
output_file='packmol.%s' % file_type,
box_size=box_size,
silent=True)
os.chdir(cwd)
shutil.copy(os.path.join(work_dir, 'packmol.%s' % file_type), os.path.join(cwd))
if not save_tmp_file:
shutil.rmtree(work_dir)
| 32.19697 | 86 | 0.628235 |
5e235040f470362ee5cc238932ab4738a6e1f7c6 | 392 | py | Python | api/views.py | miladgharibi/pollarita | b5d8154652d2b26a28f05b1745cd13e383cec40e | [
"MIT"
] | 5 | 2022-02-04T19:23:26.000Z | 2022-02-26T10:15:25.000Z | api/views.py | miladgharibi/pollarita | b5d8154652d2b26a28f05b1745cd13e383cec40e | [
"MIT"
] | null | null | null | api/views.py | miladgharibi/pollarita | b5d8154652d2b26a28f05b1745cd13e383cec40e | [
"MIT"
] | null | null | null | from polls.models import Poll
from rest_framework import viewsets
from django.contrib.auth.models import User
from api import serializers
class UserViewset(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
class PollViewset(viewsets.ModelViewSet):
queryset = Poll.objects.all()
serializer_class = serializers.PollSerializer | 30.153846 | 49 | 0.806122 |
b44967c682fe3a87a7a7423096bb421a9279c1ec | 6,156 | py | Python | src/DRSlib/debug.py | DavidRodriguezSoaresCUI/DRSlib | 34058b907df2eeea2c8b0fa0a746389930d22fe6 | [
"CC0-1.0"
] | null | null | null | src/DRSlib/debug.py | DavidRodriguezSoaresCUI/DRSlib | 34058b907df2eeea2c8b0fa0a746389930d22fe6 | [
"CC0-1.0"
] | null | null | null | src/DRSlib/debug.py | DavidRodriguezSoaresCUI/DRSlib | 34058b907df2eeea2c8b0fa0a746389930d22fe6 | [
"CC0-1.0"
] | null | null | null | # module-level docstring
__doc__='''
Debugging utils
===============
Deubgging is made easier with these convenience functions.
'''
from typing import Callable, Any, Union
import functools
import re
import shutil
import traceback
FUNC_RELEVANT_ATTR={
'__annotations__',
'__class__',
'__defaults__',
'__dict__',
'__doc__',
'__kwdefaults__',
'__module__',
'__name__',
'__qualname__'
}
def func_attribute_printout( user_funtion: Callable ) -> None:
''' Prints relevant attributes of `user_function`, with attribute name, value and type.
Relevant attributes are in `FUNC_RELEVANT_ATTR`.
Usage example: reviewing an unfamiliar imported function::
from some.module import foo
func_attribute_printout( foo )
Output::
foo.__annotations__ = ...
[...]
foo.__qualname__ = foo, type=str
'''
f_name = user_funtion.__name__
for attr in dir(user_funtion):
if attr in FUNC_RELEVANT_ATTR:
attr_val = user_funtion.__getattribute__(attr)
print(f"{f_name}.{attr} = {attr_val}, type={type(attr_val)}")
regex_var_name_from_call = re.compile(r'\(\s*(?:var\s*=\s*)?([^,\ ]+).*\).*$')
def debug_var( var: Any, var_name: str = None ) -> None:
''' Prints the name, value and type of a variable, typically used during development
for quick and easy type/value sanity checks.
Warning: if debug_var is decorated, automatic variable name retrieval doesn't work !
You will need to specify var_name.
Automatic variable name retrieval : in most situations `debug_var` should be able to
retrieve the name of the variable using the `traceback` module. If it fails, simply
add it manually. This feature was inspired by code snippets from
https://stackoverflow.com/questions/2749796/how-to-get-the-original-variable-name-of-variable-passed-to-a-function
Usage example : you want to add a sanity check to a received value::
...
res = do_something()
debug_var( res )
...
Output::
>>> DEBUG VAR: res=[1999, 2011] (list)
Note : during development a different method for retrieving var_name automatically
was found. It used the stack module. Unfortunately it was found to be
significantly slower and to not have any significant advantage so it was removed.
You can find the implementation here below::
# inspect method : similar but extract locals from frame(s) and try to find
stack = inspect.stack()[2:]
for frameinfos in stack:
lcls = frameinfos.frame.f_locals
for name in lcls:
if id(var) == id(lcls[name]):
var_name = name
break
if var_name:
break
Performance : tested on a AMD 1700 using the standard Python 3.9 interpreter,
which used only 1 execution thread. Executed 10 iteration in ~2ms and 100'000
iterations in ~11.8s (~9 iter/ms). Figures for indicative purposes only.
'''
# If `var_name` isn't specified, we try to retrieve this information
if var_name is None:
# traceback method : extract stack frame to retrieve call code ..
raw_call_traceback = traceback.extract_stack( limit=2 )[0]
call_code_line = raw_call_traceback.line
# .. then extract variable name from call to `debug_var` using regular expression (handles both args and kwargs)
var_name = regex_var_name_from_call.search(call_code_line).groups()[0]
if not var_name:
var_name = "error:Couldn't determine variable name automatically. Pleasy try to pass variable name as argument."
print( f"DEBUG VAR: {var_name}={var} ({type(var)})")
def call_progress( expected_argument: Union[Callable,str] ) -> Callable:
''' Decorator. Typically used to make the execution status of
a function verbose when developping.
The decorated function's execution is enclosed in a text-based
box, with pre/post-execution message.
`expected_argument`: Optional string argument, replaces the default
message (user_function.__name__).
Usage example: decorating debug_var (see warning in debug_var's docstring)::
decorated_debug_var = call_progress("debug_var debugs a variable")(debug_var)
hello='world'
decorated_debug_var( var=hello, var_name='hello' )
Output::
----------------------------------------
debug_var debugs a variable ..
DEBUG VAR: hello=world (<class 'str'>)
debug_var debugs a variable done
----------------------------------------
Note: this example is not great: decorating manually (not with @call_progress syntax)
and decorating a function that specifically advises against decoration. But this showcases
an actual use case that works.
'''
def actual_decorator( user_function: Callable ) -> Callable:
@functools.wraps( user_function )
def wrapper( *args, **kwargs ) -> Any:
nonlocal user_function
box_W = min( shutil.get_terminal_size().columns, 40 )
print( '-' * box_W )
print( printout_text + ' ..' )
res = user_function( *args, **kwargs )
print( printout_text + ' done' )
print( '-' * box_W )
return res
return wrapper
if callable( expected_argument ):
# decorator_with_arguments was run without argument => use
# default values for expected arguments or raise error
user_function = expected_argument
printout_text = user_function.__name__
return actual_decorator(user_function)
if isinstance(expected_argument, str):
printout_text = expected_argument
return actual_decorator
raise ValueError(f"call_progress: `expected_argument` of type {type(expected_argument)} is not in Union[Callable,str] !")
| 36.426036 | 126 | 0.632716 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.