hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79016d568d1d2d7d5bfde9336a443d48a6be49e7 | 1,683 | py | Python | configs/positional_encoding_in_gans/mspie-stylegan2_c2_config-d_ffhq_256-512_b3x8_1100k.py | plutoyuxie/mmgeneration | 0a7f5d16c970de1766ebf049d7a0264fe506504b | [
"Apache-2.0"
] | 718 | 2021-04-15T11:26:20.000Z | 2022-03-31T03:11:56.000Z | configs/positional_encoding_in_gans/mspie-stylegan2_c2_config-d_ffhq_256-512_b3x8_1100k.py | plutoyuxie/mmgeneration | 0a7f5d16c970de1766ebf049d7a0264fe506504b | [
"Apache-2.0"
] | 191 | 2021-04-15T12:13:34.000Z | 2022-03-31T16:04:36.000Z | configs/positional_encoding_in_gans/mspie-stylegan2_c2_config-d_ffhq_256-512_b3x8_1100k.py | plutoyuxie/mmgeneration | 0a7f5d16c970de1766ebf049d7a0264fe506504b | [
"Apache-2.0"
] | 107 | 2021-04-15T12:38:41.000Z | 2022-03-27T02:47:16.000Z | _base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(
type='MSPIEStyleGAN2',
generator=dict(
type='MSStyleGANv2Generator',
head_pos_encoding=dict(type='CSG'),
deconv2conv=True,
up_after_conv=True,
head_pos_size=(4, 4),
up_config=dict(scale_factor=2, mode='bilinear', align_corners=True),
out_size=256),
discriminator=dict(
type='MSStyleGAN2Discriminator', in_size=256, with_adaptive_pool=True))
train_cfg = dict(
num_upblocks=6,
multi_input_scales=[0, 2, 4],
multi_scale_probability=[0.5, 0.25, 0.25])
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10.
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=40)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
cudnn_benchmark = False
total_iters = 1100002
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
| 26.714286 | 79 | 0.649436 | _base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(
type='MSPIEStyleGAN2',
generator=dict(
type='MSStyleGANv2Generator',
head_pos_encoding=dict(type='CSG'),
deconv2conv=True,
up_after_conv=True,
head_pos_size=(4, 4),
up_config=dict(scale_factor=2, mode='bilinear', align_corners=True),
out_size=256),
discriminator=dict(
type='MSStyleGAN2Discriminator', in_size=256, with_adaptive_pool=True))
train_cfg = dict(
num_upblocks=6,
multi_input_scales=[0, 2, 4],
multi_scale_probability=[0.5, 0.25, 0.25])
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10.
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=40)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
])
cudnn_benchmark = False
total_iters = 1100002
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
| true | true |
79016e655840dcf25f5b90b8bffc280f87a56c79 | 4,446 | py | Python | pandas_ta/overlap/hilo.py | MyBourse/pandas-ta | 5998e92e39b71cd79a6e75d7c599492181af5f65 | [
"MIT"
] | 2 | 2021-03-30T01:23:14.000Z | 2021-04-02T18:04:51.000Z | pandas_ta/overlap/hilo.py | lukaszbinden/pandas-ta | 98478f8bf049a4c8748d6f3c795f4f335ced05ca | [
"MIT"
] | null | null | null | pandas_ta/overlap/hilo.py | lukaszbinden/pandas-ta | 98478f8bf049a4c8748d6f3c795f4f335ced05ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from numpy import NaN as npNaN
from pandas import DataFrame, Series
# from pandas_ta.overlap.ma import ma
from .ma import ma
from pandas_ta.utils import get_offset, verify_series
def hilo(high, low, close, high_length=None, low_length=None, mamode=None, offset=None, **kwargs):
"""Indicator: Gann HiLo (HiLo)"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
high_length = int(high_length) if high_length and high_length > 0 else 13
low_length = int(low_length) if low_length and low_length > 0 else 21
mamode = mamode.lower() if isinstance(mamode, str) else "sma"
offset = get_offset(offset)
# Calculate Result
m = close.size
hilo = Series(npNaN, index=close.index)
long = Series(npNaN, index=close.index)
short = Series(npNaN, index=close.index)
high_ma = ma(mamode, high, length=high_length)
low_ma = ma(mamode, low, length=low_length)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = long.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = short.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
long.iloc[i] = short.iloc[i] = hilo.iloc[i - 1]
# Offset
if offset != 0:
hilo = hilo.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
# Handle fills
if "fillna" in kwargs:
hilo.fillna(kwargs["fillna"], inplace=True)
long.fillna(kwargs["fillna"], inplace=True)
short.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
hilo.fillna(method=kwargs["fill_method"], inplace=True)
long.fillna(method=kwargs["fill_method"], inplace=True)
short.fillna(method=kwargs["fill_method"], inplace=True)
# Name & Category
_props = f"_{high_length}_{low_length}"
data = {f"HILO{_props}": hilo, f"HILOl{_props}": long, f"HILOs{_props}": short}
df = DataFrame(data, index=close.index)
df.name = f"HILO{_props}"
df.category = "overlap"
return df
hilo.__doc__ = \
"""Gann HiLo Activator(HiLo)
The Gann High Low Activator Indicator was created by Robert Krausz in a 1998
issue of Stocks & Commodities Magazine. It is a moving average based trend
indicator consisting of two different simple moving averages.
The indicator tracks both curves (of the highs and the lows). The close of the
bar defines which of the two gets plotted.
Increasing high_length and decreasing low_length better for short trades,
vice versa for long positions.
Sources:
https://www.sierrachart.com/index.php?page=doc/StudiesReference.php&ID=447&Name=Gann_HiLo_Activator
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/simple-moving-average-sma/
https://www.tradingview.com/script/XNQSLIYb-Gann-High-Low/
Calculation:
Default Inputs:
high_length=13, low_length=21, mamode="sma"
EMA = Exponential Moving Average
HMA = Hull Moving Average
SMA = Simple Moving Average # Default
if "ema":
high_ma = EMA(high, high_length)
low_ma = EMA(low, low_length)
elif "hma":
high_ma = HMA(high, high_length)
low_ma = HMA(low, low_length)
else: # "sma"
high_ma = SMA(high, high_length)
low_ma = SMA(low, low_length)
# Similar to Supertrend MA selection
hilo = Series(npNaN, index=close.index)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
high_length (int): It's period. Default: 13
low_length (int): It's period. Default: 21
mamode (str): Options: 'sma' or 'ema'. Default: 'sma'
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: HILO (line), HILOl (long), HILOs (short) columns.
"""
| 34.734375 | 111 | 0.65857 |
from numpy import NaN as npNaN
from pandas import DataFrame, Series
from .ma import ma
from pandas_ta.utils import get_offset, verify_series
def hilo(high, low, close, high_length=None, low_length=None, mamode=None, offset=None, **kwargs):
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
high_length = int(high_length) if high_length and high_length > 0 else 13
low_length = int(low_length) if low_length and low_length > 0 else 21
mamode = mamode.lower() if isinstance(mamode, str) else "sma"
offset = get_offset(offset)
m = close.size
hilo = Series(npNaN, index=close.index)
long = Series(npNaN, index=close.index)
short = Series(npNaN, index=close.index)
high_ma = ma(mamode, high, length=high_length)
low_ma = ma(mamode, low, length=low_length)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = long.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = short.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
long.iloc[i] = short.iloc[i] = hilo.iloc[i - 1]
if offset != 0:
hilo = hilo.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
if "fillna" in kwargs:
hilo.fillna(kwargs["fillna"], inplace=True)
long.fillna(kwargs["fillna"], inplace=True)
short.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
hilo.fillna(method=kwargs["fill_method"], inplace=True)
long.fillna(method=kwargs["fill_method"], inplace=True)
short.fillna(method=kwargs["fill_method"], inplace=True)
_props = f"_{high_length}_{low_length}"
data = {f"HILO{_props}": hilo, f"HILOl{_props}": long, f"HILOs{_props}": short}
df = DataFrame(data, index=close.index)
df.name = f"HILO{_props}"
df.category = "overlap"
return df
hilo.__doc__ = \
"""Gann HiLo Activator(HiLo)
The Gann High Low Activator Indicator was created by Robert Krausz in a 1998
issue of Stocks & Commodities Magazine. It is a moving average based trend
indicator consisting of two different simple moving averages.
The indicator tracks both curves (of the highs and the lows). The close of the
bar defines which of the two gets plotted.
Increasing high_length and decreasing low_length better for short trades,
vice versa for long positions.
Sources:
https://www.sierrachart.com/index.php?page=doc/StudiesReference.php&ID=447&Name=Gann_HiLo_Activator
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/simple-moving-average-sma/
https://www.tradingview.com/script/XNQSLIYb-Gann-High-Low/
Calculation:
Default Inputs:
high_length=13, low_length=21, mamode="sma"
EMA = Exponential Moving Average
HMA = Hull Moving Average
SMA = Simple Moving Average # Default
if "ema":
high_ma = EMA(high, high_length)
low_ma = EMA(low, low_length)
elif "hma":
high_ma = HMA(high, high_length)
low_ma = HMA(low, low_length)
else: # "sma"
high_ma = SMA(high, high_length)
low_ma = SMA(low, low_length)
# Similar to Supertrend MA selection
hilo = Series(npNaN, index=close.index)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
high_length (int): It's period. Default: 13
low_length (int): It's period. Default: 21
mamode (str): Options: 'sma' or 'ema'. Default: 'sma'
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: HILO (line), HILOl (long), HILOs (short) columns.
"""
| true | true |
79016f66e59c16e4a1773919bca295e44f37e80c | 4,367 | py | Python | rasa/nlu/tokenizers/tokenizer.py | Performek/rasa | d4a88c3b97ca4cf81d011834bfbb63abbf39d697 | [
"Apache-2.0"
] | 1 | 2020-02-18T03:48:44.000Z | 2020-02-18T03:48:44.000Z | rasa/nlu/tokenizers/tokenizer.py | Doometnick/rasa | 969dc83a83f989a7774b2ff3ba186272b18bc73a | [
"Apache-2.0"
] | 4 | 2020-09-25T18:31:22.000Z | 2022-02-09T23:27:20.000Z | rasa/nlu/tokenizers/tokenizer.py | Doometnick/rasa | 969dc83a83f989a7774b2ff3ba186272b18bc73a | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Text, List, Optional, Dict, Any
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import TrainingData, Message
from rasa.nlu.components import Component
from rasa.nlu.constants import (
RESPONSE_ATTRIBUTE,
TEXT_ATTRIBUTE,
CLS_TOKEN,
TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
INTENT_ATTRIBUTE,
)
logger = logging.getLogger(__name__)
class Token(object):
def __init__(
self,
text: Text,
start: int,
data: Optional[Dict[Text, Any]] = None,
lemma: Optional[Text] = None,
end: Optional[int] = None,
) -> None:
self.start = start
self.text = text
self.end = start + len(text)
self.data = data if data else {}
self.lemma = lemma or text
self.end = end if end else start + len(text)
def set(self, prop: Text, info: Any) -> None:
self.data[prop] = info
def get(self, prop: Text, default: Optional[Any] = None) -> Any:
return self.data.get(prop, default)
def __eq__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) == (
other.start,
other.end,
other.text,
other.lemma,
)
def __lt__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) < (
other.start,
other.end,
other.text,
other.lemma,
)
class Tokenizer(Component):
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
# flag to check whether to split intents
self.intent_tokenization_flag = self.component_config.get(
"intent_tokenization_flag", False
)
# split symbol for intents
self.intent_split_symbol = self.component_config.get("intent_split_symbol", "_")
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
"""Tokenizes the text of the provided attribute of the incoming message."""
raise NotImplementedError
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Tokenize all training data."""
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
if attribute == INTENT_ATTRIBUTE:
tokens = self._split_intent(example)
else:
tokens = self.tokenize(example, attribute)
tokens = self.add_cls_token(tokens, attribute)
example.set(TOKENS_NAMES[attribute], tokens)
def process(self, message: Message, **kwargs: Any) -> None:
"""Tokenize the incoming message."""
tokens = self.tokenize(message, TEXT_ATTRIBUTE)
tokens = self.add_cls_token(tokens, TEXT_ATTRIBUTE)
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], tokens)
def _split_intent(self, message: Message):
text = message.get(INTENT_ATTRIBUTE)
words = (
text.split(self.intent_split_symbol)
if self.intent_tokenization_flag
else [text]
)
return self._convert_words_to_tokens(words, text)
@staticmethod
def _convert_words_to_tokens(words: List[Text], text: Text) -> List[Token]:
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
@staticmethod
def add_cls_token(tokens: List[Token], attribute: Text) -> List[Token]:
if attribute in [RESPONSE_ATTRIBUTE, TEXT_ATTRIBUTE] and tokens:
# +1 to have a space between the last token and the __cls__ token
idx = tokens[-1].end + 1
tokens.append(Token(CLS_TOKEN, idx))
return tokens
| 31.644928 | 88 | 0.608198 | import logging
from typing import Text, List, Optional, Dict, Any
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import TrainingData, Message
from rasa.nlu.components import Component
from rasa.nlu.constants import (
RESPONSE_ATTRIBUTE,
TEXT_ATTRIBUTE,
CLS_TOKEN,
TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
INTENT_ATTRIBUTE,
)
logger = logging.getLogger(__name__)
class Token(object):
def __init__(
self,
text: Text,
start: int,
data: Optional[Dict[Text, Any]] = None,
lemma: Optional[Text] = None,
end: Optional[int] = None,
) -> None:
self.start = start
self.text = text
self.end = start + len(text)
self.data = data if data else {}
self.lemma = lemma or text
self.end = end if end else start + len(text)
def set(self, prop: Text, info: Any) -> None:
self.data[prop] = info
def get(self, prop: Text, default: Optional[Any] = None) -> Any:
return self.data.get(prop, default)
def __eq__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) == (
other.start,
other.end,
other.text,
other.lemma,
)
def __lt__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) < (
other.start,
other.end,
other.text,
other.lemma,
)
class Tokenizer(Component):
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
super().__init__(component_config)
self.intent_tokenization_flag = self.component_config.get(
"intent_tokenization_flag", False
)
self.intent_split_symbol = self.component_config.get("intent_split_symbol", "_")
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
raise NotImplementedError
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
if attribute == INTENT_ATTRIBUTE:
tokens = self._split_intent(example)
else:
tokens = self.tokenize(example, attribute)
tokens = self.add_cls_token(tokens, attribute)
example.set(TOKENS_NAMES[attribute], tokens)
def process(self, message: Message, **kwargs: Any) -> None:
tokens = self.tokenize(message, TEXT_ATTRIBUTE)
tokens = self.add_cls_token(tokens, TEXT_ATTRIBUTE)
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], tokens)
def _split_intent(self, message: Message):
text = message.get(INTENT_ATTRIBUTE)
words = (
text.split(self.intent_split_symbol)
if self.intent_tokenization_flag
else [text]
)
return self._convert_words_to_tokens(words, text)
@staticmethod
def _convert_words_to_tokens(words: List[Text], text: Text) -> List[Token]:
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
@staticmethod
def add_cls_token(tokens: List[Token], attribute: Text) -> List[Token]:
if attribute in [RESPONSE_ATTRIBUTE, TEXT_ATTRIBUTE] and tokens:
idx = tokens[-1].end + 1
tokens.append(Token(CLS_TOKEN, idx))
return tokens
| true | true |
79016fa6c6014fafe5eec382b6be4da591309ba9 | 32 | py | Python | edureka.py | jatin06/learning-git | 4e531193a95f6dc8daeb5c4c7a15a2fe786583ae | [
"MIT"
] | null | null | null | edureka.py | jatin06/learning-git | 4e531193a95f6dc8daeb5c4c7a15a2fe786583ae | [
"MIT"
] | null | null | null | edureka.py | jatin06/learning-git | 4e531193a95f6dc8daeb5c4c7a15a2fe786583ae | [
"MIT"
] | null | null | null | print ("welcome to edureka!! ")
| 16 | 31 | 0.65625 | print ("welcome to edureka!! ")
| true | true |
790171254393847e77c757edc13e944620e1e566 | 6,472 | py | Python | dask/array/wrap.py | BlueOwlDev/dask | a1187b13321d69565b9c21359d739c239bd04c65 | [
"BSD-3-Clause"
] | null | null | null | dask/array/wrap.py | BlueOwlDev/dask | a1187b13321d69565b9c21359d739c239bd04c65 | [
"BSD-3-Clause"
] | null | null | null | dask/array/wrap.py | BlueOwlDev/dask | a1187b13321d69565b9c21359d739c239bd04c65 | [
"BSD-3-Clause"
] | null | null | null | from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .blockwise import BlockwiseCreateArray
from .core import Array, normalize_chunks
from .utils import (
meta_from_array,
empty_like_safe,
full_like_safe,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| 27.896552 | 87 | 0.645705 | from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .blockwise import BlockwiseCreateArray
from .core import Array, normalize_chunks
from .utils import (
meta_from_array,
empty_like_safe,
full_like_safe,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
def broadcast_trick(func):
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| true | true |
79017191ffb25ad0bc2d39bcd4d208278628a040 | 924 | py | Python | Scripts/GridSearch/ModelBuilderRLF.py | bio-hpc/sibila | 337ea84692d6ea4f4d3e4de9da51f5ee53cff6d7 | [
"Apache-2.0"
] | 1 | 2022-03-07T11:05:31.000Z | 2022-03-07T11:05:31.000Z | Scripts/GridSearch/ModelBuilderRLF.py | bio-hpc/sibila | 337ea84692d6ea4f4d3e4de9da51f5ee53cff6d7 | [
"Apache-2.0"
] | null | null | null | Scripts/GridSearch/ModelBuilderRLF.py | bio-hpc/sibila | 337ea84692d6ea4f4d3e4de9da51f5ee53cff6d7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ModelBuilderRLF.py:
"""
__author__ = "Antonio Jesús Banegas-Luna"
__version__ = "1.0"
__maintainer__ = "Antonio"
__email__ = "ajbanegas@ucam.edu"
__status__ = "Development"
from BaseModelBuilder import BaseModelBuilder
class ModelBuilderRLF(BaseModelBuilder):
def get_default_model(self):
p = {}
p['model'] = self.model_name
p['train_grid'] = 'NONE'
p['type_ml'] = 'classification'
p['n_jobs'] = 8
p['params'] = {}
p['params']['tree_size'] = 4
p['params']['sample_fract'] = 'default'
p['params']['max_rules'] = 2000
p['params']['memory_par'] = 0.01
p['params']['rfmode'] = 'classify'
p['params']['lin_trim_quantile'] = 0.025
p['params']['lin_standardise'] = True
p['params']['exp_rand_tree_size'] = True
p['params_grid'] = {}
return p
| 24.972973 | 48 | 0.580087 |
__author__ = "Antonio Jesús Banegas-Luna"
__version__ = "1.0"
__maintainer__ = "Antonio"
__email__ = "ajbanegas@ucam.edu"
__status__ = "Development"
from BaseModelBuilder import BaseModelBuilder
class ModelBuilderRLF(BaseModelBuilder):
def get_default_model(self):
p = {}
p['model'] = self.model_name
p['train_grid'] = 'NONE'
p['type_ml'] = 'classification'
p['n_jobs'] = 8
p['params'] = {}
p['params']['tree_size'] = 4
p['params']['sample_fract'] = 'default'
p['params']['max_rules'] = 2000
p['params']['memory_par'] = 0.01
p['params']['rfmode'] = 'classify'
p['params']['lin_trim_quantile'] = 0.025
p['params']['lin_standardise'] = True
p['params']['exp_rand_tree_size'] = True
p['params_grid'] = {}
return p
| true | true |
79017262dd9a268afd502f5cc32e22c15e723371 | 154 | py | Python | analysis/tools/count_histories.py | beykyle/omp-uq | 7d9b720d874b634f3a56878ce34f29553441194e | [
"MIT"
] | null | null | null | analysis/tools/count_histories.py | beykyle/omp-uq | 7d9b720d874b634f3a56878ce34f29553441194e | [
"MIT"
] | null | null | null | analysis/tools/count_histories.py | beykyle/omp-uq | 7d9b720d874b634f3a56878ce34f29553441194e | [
"MIT"
] | null | null | null | import sys
from CGMFtk import histories as fh
if __name__ == "__main__":
hist = fh.Histories(sys.argv[1])
print(len(hist.getFissionHistories()))
| 22 | 42 | 0.714286 | import sys
from CGMFtk import histories as fh
if __name__ == "__main__":
hist = fh.Histories(sys.argv[1])
print(len(hist.getFissionHistories()))
| true | true |
790173969efb945f4bb3a24471e09eea76be1304 | 299 | py | Python | nsls2_catalogs/tes/__init__.py | NSLS-II/nsls2-catalogs | 8dab7db65335ccd20eacf7f2e999d64ec7325620 | [
"BSD-3-Clause"
] | null | null | null | nsls2_catalogs/tes/__init__.py | NSLS-II/nsls2-catalogs | 8dab7db65335ccd20eacf7f2e999d64ec7325620 | [
"BSD-3-Clause"
] | null | null | null | nsls2_catalogs/tes/__init__.py | NSLS-II/nsls2-catalogs | 8dab7db65335ccd20eacf7f2e999d64ec7325620 | [
"BSD-3-Clause"
] | null | null | null | from databroker.v1 import from_config
from databroker.v0 import Broker
from .. import load_config
name = 'tes'
v0_catalog = Broker.from_config(load_config(f'{name}/{name}.yml'))
v1_catalog = from_config(load_config(f'{name}/{name}.yml'))
catalog = from_config(load_config(f'{name}/{name}.yml')).v2
| 33.222222 | 66 | 0.755853 | from databroker.v1 import from_config
from databroker.v0 import Broker
from .. import load_config
name = 'tes'
v0_catalog = Broker.from_config(load_config(f'{name}/{name}.yml'))
v1_catalog = from_config(load_config(f'{name}/{name}.yml'))
catalog = from_config(load_config(f'{name}/{name}.yml')).v2
| true | true |
790173bd5e7c35d6cbb74a0b6289f9a3f37db36b | 3,356 | py | Python | airsenal/framework/player_model.py | JPKFin/AIrsenal | 7824ae4c07c9f21336f2986c0439549e9c346433 | [
"MIT"
] | null | null | null | airsenal/framework/player_model.py | JPKFin/AIrsenal | 7824ae4c07c9f21336f2986c0439549e9c346433 | [
"MIT"
] | null | null | null | airsenal/framework/player_model.py | JPKFin/AIrsenal | 7824ae4c07c9f21336f2986c0439549e9c346433 | [
"MIT"
] | null | null | null | import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from typing import Any, Dict, Optional
class PlayerModel(object):
"""
numpyro implementation of the AIrsenal player model.
"""
def __init__(self):
self.player_ids = None
self.samples = None
@staticmethod
def _model(
nplayer: int, nmatch: int, minutes: jnp.array, y: jnp.array, alpha: jnp.array
):
theta = dist.Dirichlet(concentration=alpha)
# one sample from the prior per player
with numpyro.plate("nplayer", nplayer):
dprobs = numpyro.sample("probs", theta)
# now it's all about how to broadcast in the right dimensions.....
prob_score = numpyro.deterministic(
"prob_score", dprobs[:, 0, None] * (minutes / 90.0)
)
prob_assist = numpyro.deterministic(
"prob_assist", dprobs[:, 1, None] * (minutes / 90.0)
)
prob_neither = numpyro.deterministic(
"prob_neither", dprobs[:, 2, None] * (minutes / 90.0) + (90.0 - minutes)
)
theta_mins = dist.Multinomial(
probs=jnp.moveaxis(jnp.array([prob_score, prob_assist, prob_neither]), 0, 2)
)
return numpyro.sample("obs", theta_mins, obs=y)
def fit(
self,
data,
random_state: int = 42,
num_warmup: int = 500,
num_samples: int = 2000,
mcmc_kwargs: Optional[Dict[str, Any]] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
):
self.player_ids = data["player_ids"]
kernel = NUTS(self._model)
mcmc = MCMC(
kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=1,
progress_bar=True,
**(mcmc_kwargs or {}),
)
rng_key, rng_key_predict = random.split(random.PRNGKey(44))
mcmc.run(
rng_key,
data["nplayer"],
data["nmatch"],
data["minutes"],
data["y"],
data["alpha"],
**(run_kwargs or {}),
)
self.samples = mcmc.get_samples()
return self
def get_probs(self):
prob_dict = {
"player_id": [],
"prob_score": [],
"prob_assist": [],
"prob_neither": [],
}
for i, pid in enumerate(self.player_ids):
prob_dict["player_id"].append(pid)
prob_dict["prob_score"].append(float(self.samples["probs"][:, i, 0].mean()))
prob_dict["prob_assist"].append(
float(self.samples["probs"][:, i, 1].mean())
)
prob_dict["prob_neither"].append(
float(self.samples["probs"][:, i, 2].mean())
)
return prob_dict
def get_probs_for_player(self, player_id):
try:
index = list(self.player_ids).index(player_id)
except (ValueError):
raise RuntimeError(f"Unknown player_id {player_id}")
prob_score = float(self.samples["probs"][:, index, 0].mean())
prob_assist = float(self.samples["probs"][:, index, 1].mean())
prob_neither = float(self.samples["probs"][:, index, 2].mean())
return (prob_score, prob_assist, prob_neither)
| 33.227723 | 88 | 0.554827 | import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from typing import Any, Dict, Optional
class PlayerModel(object):
def __init__(self):
self.player_ids = None
self.samples = None
@staticmethod
def _model(
nplayer: int, nmatch: int, minutes: jnp.array, y: jnp.array, alpha: jnp.array
):
theta = dist.Dirichlet(concentration=alpha)
with numpyro.plate("nplayer", nplayer):
dprobs = numpyro.sample("probs", theta)
prob_score = numpyro.deterministic(
"prob_score", dprobs[:, 0, None] * (minutes / 90.0)
)
prob_assist = numpyro.deterministic(
"prob_assist", dprobs[:, 1, None] * (minutes / 90.0)
)
prob_neither = numpyro.deterministic(
"prob_neither", dprobs[:, 2, None] * (minutes / 90.0) + (90.0 - minutes)
)
theta_mins = dist.Multinomial(
probs=jnp.moveaxis(jnp.array([prob_score, prob_assist, prob_neither]), 0, 2)
)
return numpyro.sample("obs", theta_mins, obs=y)
def fit(
self,
data,
random_state: int = 42,
num_warmup: int = 500,
num_samples: int = 2000,
mcmc_kwargs: Optional[Dict[str, Any]] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
):
self.player_ids = data["player_ids"]
kernel = NUTS(self._model)
mcmc = MCMC(
kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=1,
progress_bar=True,
**(mcmc_kwargs or {}),
)
rng_key, rng_key_predict = random.split(random.PRNGKey(44))
mcmc.run(
rng_key,
data["nplayer"],
data["nmatch"],
data["minutes"],
data["y"],
data["alpha"],
**(run_kwargs or {}),
)
self.samples = mcmc.get_samples()
return self
def get_probs(self):
prob_dict = {
"player_id": [],
"prob_score": [],
"prob_assist": [],
"prob_neither": [],
}
for i, pid in enumerate(self.player_ids):
prob_dict["player_id"].append(pid)
prob_dict["prob_score"].append(float(self.samples["probs"][:, i, 0].mean()))
prob_dict["prob_assist"].append(
float(self.samples["probs"][:, i, 1].mean())
)
prob_dict["prob_neither"].append(
float(self.samples["probs"][:, i, 2].mean())
)
return prob_dict
def get_probs_for_player(self, player_id):
try:
index = list(self.player_ids).index(player_id)
except (ValueError):
raise RuntimeError(f"Unknown player_id {player_id}")
prob_score = float(self.samples["probs"][:, index, 0].mean())
prob_assist = float(self.samples["probs"][:, index, 1].mean())
prob_neither = float(self.samples["probs"][:, index, 2].mean())
return (prob_score, prob_assist, prob_neither)
| true | true |
7901746461fddaa02bec11dd3bbf9afc1a3b1382 | 49,985 | py | Python | ty_lib/test_pattern_generator2.py | colour-science/sample_code | 8bda35b674d770da5a0e6c210634a77691527fce | [
"BSD-3-Clause"
] | 1 | 2021-01-23T03:06:53.000Z | 2021-01-23T03:06:53.000Z | ty_lib/test_pattern_generator2.py | colour-science/sample_code | 8bda35b674d770da5a0e6c210634a77691527fce | [
"BSD-3-Clause"
] | null | null | null | ty_lib/test_pattern_generator2.py | colour-science/sample_code | 8bda35b674d770da5a0e6c210634a77691527fce | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
| 29.806202 | 105 | 0.573112 |
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
base = length / div_num
ret_array = [base for x in range(div_num)]
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
color_space = models.ACES_CG_COLOURSPACE
cmf_xy = _get_cmfs_xy()
triangulation = Delaunay(cmf_xy)
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
mask = 1 - mask[:, :, np.newaxis]
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
rgb[rgb == 0] = 1.0
rgb = normalise_maximum(rgb, axis=-1)
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
mask = 1 - mask[:, :, np.newaxis]
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
rgb_org = normalise_maximum(rgb, axis=-1)
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
max = 2 ** bit_depth
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
pixel_num = dot_size * 2 * repeat
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
= (st_h + patch_width, st_v)
pt3 = (st_h, st_v + patch_height)
pt4 = (st_h + patch_width, st_v + patch_height)
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
y = np.log2(x / mid_gray)
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
width = img.shape[1]
height = img.shape[0]
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint
illuminant_RGB = rgb_white_point
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
h_patch_num = 6
v_patch_num = 4
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
| true | true |
790175423e938eeaeb79260c59952e39d2b5a8cf | 1,637 | py | Python | recognition/base.py | ReanGD/smart-home | 0d3ebe3213ad275f64490218ca3dbc0128c12339 | [
"Apache-2.0"
] | 1 | 2018-07-31T21:17:37.000Z | 2018-07-31T21:17:37.000Z | recognition/base.py | ReanGD/smart-home | 0d3ebe3213ad275f64490218ca3dbc0128c12339 | [
"Apache-2.0"
] | null | null | null | recognition/base.py | ReanGD/smart-home | 0d3ebe3213ad275f64490218ca3dbc0128c12339 | [
"Apache-2.0"
] | null | null | null | from audio import Stream, AudioSettings
class PhraseRecognizer(object):
def __init__(self, config, audio_settings: AudioSettings):
self._config = config
self._audio_settings = audio_settings
def get_config(self):
return self._config
def get_audio_settings(self) -> AudioSettings:
return self._audio_settings
async def recognize(self, stream: Stream, recv_callback):
raise Exception('Not implemented "recognize"')
class HotwordRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def start(self):
pass
def is_hotword(self, raw_frames) -> bool:
raise Exception('Not implemented "is_hotword"')
class VADRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def is_speech(self, raw_frames) -> bool:
raise Exception('Not implemented "is_speech"')
class PhraseRecognizerConfig(object):
def create_phrase_recognizer(self) -> PhraseRecognizer:
raise Exception('Not implemented "create_phrase_recognizer"')
class HotwordRecognizerConfig(object):
def create_hotword_recognizer(self) -> HotwordRecognizer:
raise Exception('Not implemented "create_hotword_recognizer"')
class VADRecognizerConfig(object):
def create_vad_recognizer(self) -> VADRecognizer:
raise Exception('Not implemented "create_vad_recognizer"')
| 28.719298 | 70 | 0.717776 | from audio import Stream, AudioSettings
class PhraseRecognizer(object):
def __init__(self, config, audio_settings: AudioSettings):
self._config = config
self._audio_settings = audio_settings
def get_config(self):
return self._config
def get_audio_settings(self) -> AudioSettings:
return self._audio_settings
async def recognize(self, stream: Stream, recv_callback):
raise Exception('Not implemented "recognize"')
class HotwordRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def start(self):
pass
def is_hotword(self, raw_frames) -> bool:
raise Exception('Not implemented "is_hotword"')
class VADRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def is_speech(self, raw_frames) -> bool:
raise Exception('Not implemented "is_speech"')
class PhraseRecognizerConfig(object):
def create_phrase_recognizer(self) -> PhraseRecognizer:
raise Exception('Not implemented "create_phrase_recognizer"')
class HotwordRecognizerConfig(object):
def create_hotword_recognizer(self) -> HotwordRecognizer:
raise Exception('Not implemented "create_hotword_recognizer"')
class VADRecognizerConfig(object):
def create_vad_recognizer(self) -> VADRecognizer:
raise Exception('Not implemented "create_vad_recognizer"')
| true | true |
79017579a201dcf5f57280058af9232110541daf | 6,839 | py | Python | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteGatewayResult',
'AwaitableGetExpressRouteGatewayResult',
'get_express_route_gateway',
]
@pulumi.output_type
class GetExpressRouteGatewayResult:
"""
ExpressRoute gateway resource.
"""
def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None):
if auto_scale_configuration and not isinstance(auto_scale_configuration, dict):
raise TypeError("Expected argument 'auto_scale_configuration' to be a dict")
pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if express_route_connections and not isinstance(express_route_connections, list):
raise TypeError("Expected argument 'express_route_connections' to be a list")
pulumi.set(__self__, "express_route_connections", express_route_connections)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="autoScaleConfiguration")
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
"""
Configuration for auto scaling.
"""
return pulumi.get(self, "auto_scale_configuration")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnections")
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
"""
List of ExpressRoute connections to the ExpressRoute gateway.
"""
return pulumi.get(self, "express_route_connections")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the express route gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
"""
The Virtual Hub where the ExpressRoute gateway is or will be deployed.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteGatewayResult(
auto_scale_configuration=self.auto_scale_configuration,
etag=self.etag,
express_route_connections=self.express_route_connections,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_express_route_gateway(express_route_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult:
"""
ExpressRoute gateway resource.
API Version: 2020-08-01.
:param str express_route_gateway_name: The name of the ExpressRoute gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(
auto_scale_configuration=__ret__.auto_scale_configuration,
etag=__ret__.etag,
express_route_connections=__ret__.express_route_connections,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub)
| 36.967568 | 201 | 0.663109 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteGatewayResult',
'AwaitableGetExpressRouteGatewayResult',
'get_express_route_gateway',
]
@pulumi.output_type
class GetExpressRouteGatewayResult:
def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None):
if auto_scale_configuration and not isinstance(auto_scale_configuration, dict):
raise TypeError("Expected argument 'auto_scale_configuration' to be a dict")
pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if express_route_connections and not isinstance(express_route_connections, list):
raise TypeError("Expected argument 'express_route_connections' to be a list")
pulumi.set(__self__, "express_route_connections", express_route_connections)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="autoScaleConfiguration")
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
return pulumi.get(self, "auto_scale_configuration")
@property
@pulumi.getter
def etag(self) -> str:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnections")
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
return pulumi.get(self, "express_route_connections")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
return pulumi.get(self, "virtual_hub")
class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteGatewayResult(
auto_scale_configuration=self.auto_scale_configuration,
etag=self.etag,
express_route_connections=self.express_route_connections,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_express_route_gateway(express_route_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult:
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(
auto_scale_configuration=__ret__.auto_scale_configuration,
etag=__ret__.etag,
express_route_connections=__ret__.express_route_connections,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub)
| true | true |
790175df71984046c36d0e478a37d48f8b80d535 | 236 | py | Python | tests/sample_sdk_https.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 3 | 2016-12-15T15:54:37.000Z | 2021-08-10T03:16:18.000Z | tests/sample_sdk_https.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 44 | 2016-04-13T08:19:45.000Z | 2022-01-14T12:58:44.000Z | tests/sample_sdk_https.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 5 | 2016-11-22T11:23:28.000Z | 2020-01-28T12:26:10.000Z | from sdklib.http import HttpSdk
class SampleHttpsHttpSdk(HttpSdk):
DEFAULT_HOST = "https://www.google.com"
API_IVANPRJCTS_PATH = "/ivanprjcts"
def get_ivanprjcts(self):
return self.get(self.API_IVANPRJCTS_PATH)
| 19.666667 | 49 | 0.728814 | from sdklib.http import HttpSdk
class SampleHttpsHttpSdk(HttpSdk):
DEFAULT_HOST = "https://www.google.com"
API_IVANPRJCTS_PATH = "/ivanprjcts"
def get_ivanprjcts(self):
return self.get(self.API_IVANPRJCTS_PATH)
| true | true |
79017678f78ba6aa7a00b756d1da6a0797025124 | 3,270 | py | Python | src/unittest/python/plugins/python/test_plugin_helper_tests.py | igordertigor/pybuilder | 772cf66a6fea86c59bd76f22388b0ce964b2fc1a | [
"Apache-2.0"
] | 1 | 2019-01-17T03:35:32.000Z | 2019-01-17T03:35:32.000Z | src/unittest/python/plugins/python/test_plugin_helper_tests.py | igordertigor/pybuilder | 772cf66a6fea86c59bd76f22388b0ce964b2fc1a | [
"Apache-2.0"
] | 1 | 2022-03-10T13:19:18.000Z | 2022-03-10T13:19:18.000Z | src/unittest/python/plugins/python/test_plugin_helper_tests.py | igordertigor/pybuilder | 772cf66a6fea86c59bd76f22388b0ce964b2fc1a | [
"Apache-2.0"
] | 1 | 2020-11-02T10:06:11.000Z | 2020-11-02T10:06:11.000Z | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.test_plugin_helper import ReportsProcessor
from test_utils import Mock, patch
class ReportsProcessorTests(unittest.TestCase):
def setUp(self):
self.reports_processor = ReportsProcessor(Mock(), Mock())
total_time = Mock()
total_time.get_millis.return_value = 42
self.reports_processor.process_reports([], total_time)
def test_should_raise_exception_when_not_all_tests_pass(self):
self.reports_processor.tests_failed = 1
self.assertRaises(BuildFailedException, self.reports_processor.write_report_and_ensure_all_tests_passed)
def test_should_not_raise_exception_when_all_tests_pass(self):
self.reports_processor.tests_failed = 0
self.reports_processor.write_report_and_ensure_all_tests_passed()
@patch("pybuilder.plugins.python.test_plugin_helper.render_report", return_value='rendered-report')
def test_should_write_report(self, render_report):
self.reports_processor.write_report_and_ensure_all_tests_passed()
self.reports_processor.project.write_report.assert_called_with("integrationtest.json", 'rendered-report')
def test_should_parse_reports(self):
reports = [
{'test': 'name1', 'test_file':
'file1', 'success': False, 'time': 1},
{'test': 'name2', 'test_file':
'file2', 'success': False, 'time': 2},
{'test': 'name3', 'test_file':
'file3', 'success': True, 'time': 3},
{'test': 'name4', 'test_file': 'file4', 'success': True, 'time': 4}
]
self.reports_processor.process_reports(reports, Mock())
self.assertEqual(self.reports_processor.tests_failed, 2)
self.assertEqual(self.reports_processor.tests_executed, 4)
def test_should_create_test_report_with_attributes(self):
mock_time = Mock()
mock_time.get_millis.return_value = 42
self.reports_processor.process_reports([], mock_time)
self.reports_processor.tests_failed = 4
self.reports_processor.tests_executed = 42
self.reports_processor.reports = ['a', 'b', 'c']
self.assertEqual(self.reports_processor.test_report,
{
'num_of_tests': 42,
'success': False,
'tests': ['a', 'b', 'c'],
'tests_failed': 4,
'time': 42
}
)
| 39.878049 | 113 | 0.654434 |
import unittest
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.test_plugin_helper import ReportsProcessor
from test_utils import Mock, patch
class ReportsProcessorTests(unittest.TestCase):
def setUp(self):
self.reports_processor = ReportsProcessor(Mock(), Mock())
total_time = Mock()
total_time.get_millis.return_value = 42
self.reports_processor.process_reports([], total_time)
def test_should_raise_exception_when_not_all_tests_pass(self):
self.reports_processor.tests_failed = 1
self.assertRaises(BuildFailedException, self.reports_processor.write_report_and_ensure_all_tests_passed)
def test_should_not_raise_exception_when_all_tests_pass(self):
self.reports_processor.tests_failed = 0
self.reports_processor.write_report_and_ensure_all_tests_passed()
@patch("pybuilder.plugins.python.test_plugin_helper.render_report", return_value='rendered-report')
def test_should_write_report(self, render_report):
self.reports_processor.write_report_and_ensure_all_tests_passed()
self.reports_processor.project.write_report.assert_called_with("integrationtest.json", 'rendered-report')
def test_should_parse_reports(self):
reports = [
{'test': 'name1', 'test_file':
'file1', 'success': False, 'time': 1},
{'test': 'name2', 'test_file':
'file2', 'success': False, 'time': 2},
{'test': 'name3', 'test_file':
'file3', 'success': True, 'time': 3},
{'test': 'name4', 'test_file': 'file4', 'success': True, 'time': 4}
]
self.reports_processor.process_reports(reports, Mock())
self.assertEqual(self.reports_processor.tests_failed, 2)
self.assertEqual(self.reports_processor.tests_executed, 4)
def test_should_create_test_report_with_attributes(self):
mock_time = Mock()
mock_time.get_millis.return_value = 42
self.reports_processor.process_reports([], mock_time)
self.reports_processor.tests_failed = 4
self.reports_processor.tests_executed = 42
self.reports_processor.reports = ['a', 'b', 'c']
self.assertEqual(self.reports_processor.test_report,
{
'num_of_tests': 42,
'success': False,
'tests': ['a', 'b', 'c'],
'tests_failed': 4,
'time': 42
}
)
| true | true |
790177febcf816c121cc0c602b65cc695b8cd1ec | 692 | py | Python | nltk_utils.py | Serkanbezek/Chatbot-NLP-PyTorch | 680dfa788fa3e3162470a79e7bbd4aa02088a24d | [
"MIT"
] | 1 | 2022-03-03T18:27:23.000Z | 2022-03-03T18:27:23.000Z | nltk_utils.py | Serkanbezek/Chatbot-NLP-PyTorch | 680dfa788fa3e3162470a79e7bbd4aa02088a24d | [
"MIT"
] | null | null | null | nltk_utils.py | Serkanbezek/Chatbot-NLP-PyTorch | 680dfa788fa3e3162470a79e7bbd4aa02088a24d | [
"MIT"
] | null | null | null | import nltk
import numpy as np
#nltk.download('punkt') #downloading a package with a pretrained tokenizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def tokenize(sentence): #splitting a string into meaningful units
return nltk.word_tokenize(sentence)
def stem(word): #Generating the root form of the words
return stemmer.stem(word.lower())
def bag_of_words(tokenized_sentence, all_words):
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype = np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag
| 26.615385 | 80 | 0.703757 | import nltk
import numpy as np
r = PorterStemmer()
def tokenize(sentence):
return nltk.word_tokenize(sentence)
def stem(word):
return stemmer.stem(word.lower())
def bag_of_words(tokenized_sentence, all_words):
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype = np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag
| true | true |
790178b67adb1c4d32e83058746158b4273e9c8f | 2,349 | py | Python | simiki/config.py | timgates42/simiki | 22e544254577477c3f624c9d201f644580f36231 | [
"MIT"
] | 1,034 | 2015-01-04T05:50:05.000Z | 2022-03-23T03:08:25.000Z | simiki/config.py | timgates42/simiki | 22e544254577477c3f624c9d201f644580f36231 | [
"MIT"
] | 102 | 2015-01-12T01:20:10.000Z | 2020-12-31T01:47:25.000Z | simiki/config.py | timgates42/simiki | 22e544254577477c3f624c9d201f644580f36231 | [
"MIT"
] | 215 | 2015-01-25T13:49:49.000Z | 2022-03-22T09:14:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import os.path
import sys
import io
import logging
import datetime
from pprint import pprint
import yaml
import tzlocal
class ConfigFileNotFound(Exception):
pass
def _set_default_config():
config = {
"url": "",
"title": "",
"keywords": "",
"description": "",
"author": "",
"root": "/",
"source": "content",
"destination": "output",
"attach": "attach",
"themes_dir": "themes",
"theme": "simple2",
"default_ext": "md",
"pygments": True,
"debug": False,
"time": datetime.datetime.now(tzlocal.get_localzone()),
}
return config
def _post_process(config):
for k, v in config.items():
if v is None:
config[k] = ""
if config["url"].endswith("/"):
config["url"] = config["url"][:-1]
return config
def get_default_config():
return _post_process(_set_default_config())
def parse_config(config_file):
if not os.path.exists(config_file):
raise ConfigFileNotFound("{0} not exists".format(config_file))
default_config = _set_default_config()
with io.open(config_file, "rt", encoding="utf-8") as fd:
config = yaml.load(fd, Loader=yaml.FullLoader)
default_config.update(config)
config = _post_process(default_config)
return config
if __name__ == "__main__":
# pylint: disable=pointless-string-statement
"""
Usage:
python -m simiki.config : to test config template
python -m simiki.config _config.yml : to test _config.yml file in \
curren dir
"""
if len(sys.argv) == 1:
base_dir = os.path.dirname(__file__)
_config_file = os.path.join(base_dir, "conf_templates",
"_config.yml.in")
elif len(sys.argv) == 2:
base_dir = os.getcwd()
_config_file = os.path.join(base_dir, sys.argv[1])
else:
logging.error("Use the template config file by default, "
"you can specify the config file to parse. \n"
"Usage: `python -m simiki.config [_config.yml]'")
sys.exit(1)
pprint(parse_config(_config_file))
| 24.989362 | 75 | 0.585355 |
from __future__ import absolute_import, unicode_literals
import os
import os.path
import sys
import io
import logging
import datetime
from pprint import pprint
import yaml
import tzlocal
class ConfigFileNotFound(Exception):
pass
def _set_default_config():
config = {
"url": "",
"title": "",
"keywords": "",
"description": "",
"author": "",
"root": "/",
"source": "content",
"destination": "output",
"attach": "attach",
"themes_dir": "themes",
"theme": "simple2",
"default_ext": "md",
"pygments": True,
"debug": False,
"time": datetime.datetime.now(tzlocal.get_localzone()),
}
return config
def _post_process(config):
for k, v in config.items():
if v is None:
config[k] = ""
if config["url"].endswith("/"):
config["url"] = config["url"][:-1]
return config
def get_default_config():
return _post_process(_set_default_config())
def parse_config(config_file):
if not os.path.exists(config_file):
raise ConfigFileNotFound("{0} not exists".format(config_file))
default_config = _set_default_config()
with io.open(config_file, "rt", encoding="utf-8") as fd:
config = yaml.load(fd, Loader=yaml.FullLoader)
default_config.update(config)
config = _post_process(default_config)
return config
if __name__ == "__main__":
if len(sys.argv) == 1:
base_dir = os.path.dirname(__file__)
_config_file = os.path.join(base_dir, "conf_templates",
"_config.yml.in")
elif len(sys.argv) == 2:
base_dir = os.getcwd()
_config_file = os.path.join(base_dir, sys.argv[1])
else:
logging.error("Use the template config file by default, "
"you can specify the config file to parse. \n"
"Usage: `python -m simiki.config [_config.yml]'")
sys.exit(1)
pprint(parse_config(_config_file))
| true | true |
790178e3ed0bcfa7aa502d7406e2a2ec79590747 | 3,678 | py | Python | ckanext/stats/stats.py | mabah-mst/ckan | 105f613272c2e31daa0081ead24c678bf1b55c22 | [
"Apache-2.0"
] | 6 | 2015-11-09T00:44:51.000Z | 2019-11-21T14:56:01.000Z | ckanext/stats/stats.py | syats/ckan | 599ff35f9c289bab674f544367d5acdb1d2c9423 | [
"Apache-2.0"
] | 39 | 2015-02-18T17:32:23.000Z | 2022-03-11T18:03:36.000Z | ckanext/stats/stats.py | cascaoSDC/ckan | 75a08caa7c688ce70229dfea7070cc667a15c5e8 | [
"BSD-3-Clause"
] | 17 | 2015-03-13T18:05:05.000Z | 2020-11-06T13:55:32.000Z | # encoding: utf-8
import datetime
import logging
from ckan.common import config
from six import text_type
from sqlalchemy import Table, select, join, func, and_
import ckan.plugins as p
import ckan.model as model
log = logging.getLogger(__name__)
cache_enabled = p.toolkit.asbool(
config.get('ckanext.stats.cache_enabled', False)
)
if cache_enabled:
log.warn(
'ckanext.stats does not support caching in current implementations'
)
DATE_FORMAT = '%Y-%m-%d'
def table(name):
return Table(name, model.meta.metadata, autoload=True)
def datetime2date(datetime_):
return datetime.date(datetime_.year, datetime_.month, datetime_.day)
class Stats(object):
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
package = table('package')
j = join(member, package, member.c.table_id == package.c.id)
s = select(
[member.c.group_id,
func.count(member.c.table_id)]
).select_from(j).group_by(member.c.group_id).where(
and_(
member.c.group_id != None, member.c.table_name == 'package',
package.c.private == False, package.c.state == 'active'
)
).order_by(func.count(member.c.table_id).desc()).limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_groups = [
(model.Session.query(model.Group).get(text_type(group_id)), val)
for group_id, val in res_ids
]
return res_groups
@classmethod
def top_tags(cls, limit=10, returned_tag_info='object'): # by package
assert returned_tag_info in ('name', 'id', 'object')
tag = table('tag')
package_tag = table('package_tag')
package = table('package')
if returned_tag_info == 'name':
from_obj = [package_tag.join(tag)]
tag_column = tag.c.name
else:
from_obj = None
tag_column = package_tag.c.tag_id
j = join(
package_tag, package, package_tag.c.package_id == package.c.id
)
s = select([tag_column,
func.count(package_tag.c.package_id)],
from_obj=from_obj).select_from(j).where(
and_(
package_tag.c.state == 'active',
package.c.private == False,
package.c.state == 'active'
)
)
s = s.group_by(tag_column).order_by(
func.count(package_tag.c.package_id).desc()
).limit(limit)
res_col = model.Session.execute(s).fetchall()
if returned_tag_info in ('id', 'name'):
return res_col
elif returned_tag_info == 'object':
res_tags = [
(model.Session.query(model.Tag).get(text_type(tag_id)), val)
for tag_id, val in res_col
]
return res_tags
@classmethod
def top_package_creators(cls, limit=10):
userid_count = model.Session.query(
model.Package.creator_user_id,
func.count(model.Package.creator_user_id)
).filter(model.Package.state == 'active'
).filter(model.Package.private == False).group_by(
model.Package.creator_user_id
).order_by(func.count(model.Package.creator_user_id).desc()
).limit(limit).all()
user_count = [
(model.Session.query(model.User).get(text_type(user_id)), count)
for user_id, count in userid_count
if user_id
]
return user_count
| 32.839286 | 76 | 0.576672 |
import datetime
import logging
from ckan.common import config
from six import text_type
from sqlalchemy import Table, select, join, func, and_
import ckan.plugins as p
import ckan.model as model
log = logging.getLogger(__name__)
cache_enabled = p.toolkit.asbool(
config.get('ckanext.stats.cache_enabled', False)
)
if cache_enabled:
log.warn(
'ckanext.stats does not support caching in current implementations'
)
DATE_FORMAT = '%Y-%m-%d'
def table(name):
return Table(name, model.meta.metadata, autoload=True)
def datetime2date(datetime_):
return datetime.date(datetime_.year, datetime_.month, datetime_.day)
class Stats(object):
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
package = table('package')
j = join(member, package, member.c.table_id == package.c.id)
s = select(
[member.c.group_id,
func.count(member.c.table_id)]
).select_from(j).group_by(member.c.group_id).where(
and_(
member.c.group_id != None, member.c.table_name == 'package',
package.c.private == False, package.c.state == 'active'
)
).order_by(func.count(member.c.table_id).desc()).limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_groups = [
(model.Session.query(model.Group).get(text_type(group_id)), val)
for group_id, val in res_ids
]
return res_groups
@classmethod
def top_tags(cls, limit=10, returned_tag_info='object'):
assert returned_tag_info in ('name', 'id', 'object')
tag = table('tag')
package_tag = table('package_tag')
package = table('package')
if returned_tag_info == 'name':
from_obj = [package_tag.join(tag)]
tag_column = tag.c.name
else:
from_obj = None
tag_column = package_tag.c.tag_id
j = join(
package_tag, package, package_tag.c.package_id == package.c.id
)
s = select([tag_column,
func.count(package_tag.c.package_id)],
from_obj=from_obj).select_from(j).where(
and_(
package_tag.c.state == 'active',
package.c.private == False,
package.c.state == 'active'
)
)
s = s.group_by(tag_column).order_by(
func.count(package_tag.c.package_id).desc()
).limit(limit)
res_col = model.Session.execute(s).fetchall()
if returned_tag_info in ('id', 'name'):
return res_col
elif returned_tag_info == 'object':
res_tags = [
(model.Session.query(model.Tag).get(text_type(tag_id)), val)
for tag_id, val in res_col
]
return res_tags
@classmethod
def top_package_creators(cls, limit=10):
userid_count = model.Session.query(
model.Package.creator_user_id,
func.count(model.Package.creator_user_id)
).filter(model.Package.state == 'active'
).filter(model.Package.private == False).group_by(
model.Package.creator_user_id
).order_by(func.count(model.Package.creator_user_id).desc()
).limit(limit).all()
user_count = [
(model.Session.query(model.User).get(text_type(user_id)), count)
for user_id, count in userid_count
if user_id
]
return user_count
| true | true |
7901792d7e756f8286ce38810accc3742ee607a0 | 1,299 | py | Python | blockchain/Transaction.py | kaifkhan1040/voting | 272f7eaed7793d86e35a6c10001ee852432cf6ee | [
"MIT"
] | 12 | 2019-05-11T11:28:37.000Z | 2021-02-25T19:40:58.000Z | blockchain/Transaction.py | kaifkhan1040/voting | 272f7eaed7793d86e35a6c10001ee852432cf6ee | [
"MIT"
] | 8 | 2019-05-07T18:50:45.000Z | 2020-10-21T11:23:16.000Z | blockchain/Transaction.py | kaifkhan1040/voting | 272f7eaed7793d86e35a6c10001ee852432cf6ee | [
"MIT"
] | 7 | 2019-05-11T08:16:03.000Z | 2020-11-30T08:34:15.000Z | import hashlib
from fastecdsa import keys, curve, ecdsa
from hashlib import sha256
from uuid import uuid4
class Transaction:
def __init__(self, from_address, to_address, amount):
self.from_address = from_address
self.to_address = to_address
self.amount = amount
self.id = str(uuid4()).replace('-', '')
self.signature = None
def calculate_hash(self):
return sha256((str(self.from_address) + str(self.to_address) + str(self.amount) + self.id).encode()).hexdigest()
def sign_tx(self, priv_key):
hash_tx = self.calculate_hash()
self.signature = ecdsa.sign(hash_tx, priv_key, hashfunc=sha256)
def is_valid(self):
if self.signature is None:
return True
if len(self.signature) == 0 and self.to_address is None:
return False
hash_tx = self.calculate_hash()
pubkey = keys.get_public_keys_from_sig(self.signature, hash_tx, curve=curve.P256, hashfunc=sha256)
valid = ecdsa.verify(self.signature, hash_tx, pubkey[0], hashfunc=sha256)
return valid
def serialize(self):
return {
'id': self.id,
'from_address': self.from_address,
'to_address': self.to_address,
'amount': self.amount
}
| 30.928571 | 120 | 0.635874 | import hashlib
from fastecdsa import keys, curve, ecdsa
from hashlib import sha256
from uuid import uuid4
class Transaction:
def __init__(self, from_address, to_address, amount):
self.from_address = from_address
self.to_address = to_address
self.amount = amount
self.id = str(uuid4()).replace('-', '')
self.signature = None
def calculate_hash(self):
return sha256((str(self.from_address) + str(self.to_address) + str(self.amount) + self.id).encode()).hexdigest()
def sign_tx(self, priv_key):
hash_tx = self.calculate_hash()
self.signature = ecdsa.sign(hash_tx, priv_key, hashfunc=sha256)
def is_valid(self):
if self.signature is None:
return True
if len(self.signature) == 0 and self.to_address is None:
return False
hash_tx = self.calculate_hash()
pubkey = keys.get_public_keys_from_sig(self.signature, hash_tx, curve=curve.P256, hashfunc=sha256)
valid = ecdsa.verify(self.signature, hash_tx, pubkey[0], hashfunc=sha256)
return valid
def serialize(self):
return {
'id': self.id,
'from_address': self.from_address,
'to_address': self.to_address,
'amount': self.amount
}
| true | true |
790179b1396fdd73fefa5d3753233d7ab0acfc5c | 1,185 | py | Python | api/src/opentrons/hardware_control/g_code_parsing/g_code_functionality_defs/tempdeck/get_temp_g_code_functionality_def.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/hardware_control/g_code_parsing/g_code_functionality_defs/tempdeck/get_temp_g_code_functionality_def.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/hardware_control/g_code_parsing/g_code_functionality_defs/tempdeck/get_temp_g_code_functionality_def.py | knownmed/opentrons | d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c | [
"Apache-2.0"
] | null | null | null | import re
from typing import Dict
from opentrons.hardware_control.g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import ( # noqa: E501
GCodeFunctionalityDefBase,
)
class GetTempGCodeFunctionalityDef(GCodeFunctionalityDefBase):
RESPONSE_RE = re.compile(r"T:(?P<set_temp>.*?)C:(?P<current_temp>\d+.\d+)")
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return "Getting temperature"
@classmethod
def _generate_response_explanation(cls, response: str) -> str:
match = cls.RESPONSE_RE.match(response)
message = ""
if match is not None:
current_temp = match.groupdict()["current_temp"].strip()
set_temp = match.groupdict()["set_temp"].strip()
if set_temp == "none":
message = (
f"Temp deck is disengaged. "
f"Current temperature is {current_temp}C"
)
else:
message = (
f"Set temperature is {set_temp}C. "
f"Current temperature is {current_temp}C"
)
return message
| 34.852941 | 125 | 0.605063 | import re
from typing import Dict
from opentrons.hardware_control.g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class GetTempGCodeFunctionalityDef(GCodeFunctionalityDefBase):
RESPONSE_RE = re.compile(r"T:(?P<set_temp>.*?)C:(?P<current_temp>\d+.\d+)")
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return "Getting temperature"
@classmethod
def _generate_response_explanation(cls, response: str) -> str:
match = cls.RESPONSE_RE.match(response)
message = ""
if match is not None:
current_temp = match.groupdict()["current_temp"].strip()
set_temp = match.groupdict()["set_temp"].strip()
if set_temp == "none":
message = (
f"Temp deck is disengaged. "
f"Current temperature is {current_temp}C"
)
else:
message = (
f"Set temperature is {set_temp}C. "
f"Current temperature is {current_temp}C"
)
return message
| true | true |
79017a40dd5a88091830cc6c4a40fd29b48a50dc | 1,105 | py | Python | Python3/Tornado/apps/pg/PG_Wallet/src/lib/sql.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 24 | 2018-11-01T03:36:43.000Z | 2022-03-28T08:20:30.000Z | Python3/Tornado/apps/pg/PG_Wallet/src/lib/sql.py | songning4/QBlockChainNotes | d65ede073f5a20f728f41cc6850409693820cdb1 | [
"MIT"
] | 57 | 2019-12-04T08:26:47.000Z | 2022-03-08T07:35:15.000Z | Python3/Tornado/apps/pg/PG_Wallet/src/lib/sql.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 11 | 2019-01-04T08:41:57.000Z | 2022-03-16T03:51:36.000Z | #!coding:utf8
#author:yqq
#date:2020/4/30 0030 17:11
#description:
import os
import pymysql
SQL_PASSWD = os.environ.get('SQL_PWD')
def open(host : str,usr : str, passwd : str,db_name : str):
conn = pymysql.connect(host=host, user=usr,
password=passwd, db=db_name,
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
return conn
def close(conn):
conn.close()
def execute(conn,cmd):
cur = conn.cursor()
cur.execute(cmd)
conn.commit() #fixed bug by yqq 2019-05-01
return cur.fetchall()
def run(cmd):
conn = open()
result = execute(conn,cmd)
close(conn)
return result
def get_column_values(conn,table_name,column_name):
cmd = "SELECT {0} FROM {1}".format(column_name,table_name)
return execute(conn,cmd)
def main():
host = '192.168.10.29'
usr = 'root'
passwd = 'eWFuZ3FpbmdxaW5n'
dbname = 'test_1'
conn = open(host=host, usr=usr, passwd=passwd, db_name=dbname )
print(get_column_values(conn,'t_test_student','name'))
close(conn)
if __name__ == "__main__":
main()
| 21.666667 | 71 | 0.643439 |
import os
import pymysql
SQL_PASSWD = os.environ.get('SQL_PWD')
def open(host : str,usr : str, passwd : str,db_name : str):
conn = pymysql.connect(host=host, user=usr,
password=passwd, db=db_name,
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
return conn
def close(conn):
conn.close()
def execute(conn,cmd):
cur = conn.cursor()
cur.execute(cmd)
conn.commit()
return cur.fetchall()
def run(cmd):
conn = open()
result = execute(conn,cmd)
close(conn)
return result
def get_column_values(conn,table_name,column_name):
cmd = "SELECT {0} FROM {1}".format(column_name,table_name)
return execute(conn,cmd)
def main():
host = '192.168.10.29'
usr = 'root'
passwd = 'eWFuZ3FpbmdxaW5n'
dbname = 'test_1'
conn = open(host=host, usr=usr, passwd=passwd, db_name=dbname )
print(get_column_values(conn,'t_test_student','name'))
close(conn)
if __name__ == "__main__":
main()
| true | true |
79017c0607032cf5f29a9166dd2136111da20517 | 3,223 | py | Python | demo.py | ijinmao/CAM-Localization | dfa214be984f77d577dba1065e2c63e0c1b0b82b | [
"MIT"
] | 4 | 2017-09-07T05:55:58.000Z | 2019-09-05T04:02:41.000Z | demo.py | ijinmao/CAM-Localization | dfa214be984f77d577dba1065e2c63e0c1b0b82b | [
"MIT"
] | null | null | null | demo.py | ijinmao/CAM-Localization | dfa214be984f77d577dba1065e2c63e0c1b0b82b | [
"MIT"
] | 1 | 2019-04-02T05:03:25.000Z | 2019-04-02T05:03:25.000Z |
import numpy as np
import cv2
import matplotlib.pylab as plt
from keras.preprocessing.image import load_img
from keras.models import model_from_json
from models import (
create_cam_model, preprocess_image,
get_cam_img
)
# Define CAM conv layer name
CAM_CONV_LAYER = 'cam_conv_layer'
def read_model(model_path, weigths_path):
"""Load your pretrained model
"""
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model
def train_cam_model(X_train, Y_train, X_test, Y_test,
batch_size, nb_epoch):
"""Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model.
"""
# Use your allready trained model
pretrained_model_path = ''
pretrained_weights_path = ''
# Your pretrained model name
pretrained_model_name = 'VGG16'
# Label class num
num_classes = 10
# CAM input spacial size
gap_spacial_size = 14
# The layer before CAM(GAP) layers.
# CAM paper suggests to use the last convnet(VGG) or mergenet(Inception, or other architectures)
# Change this name based on your model.
if pretrained_model_name == 'VGG16':
in_layer_name = 'block5_conv3'
elif pretrained_model_name == 'InceptionV3':
in_layer_name = 'batchnormalization_921'
elif pretrained_model_name == 'ResNet50':
in_layer_name = 'merge_13'
else:
in_layer_name = ''
# Load your allready trained model, transfer it to CAM model
pretrained_model = read_model(pretrained_model_path,
pretrained_weights_path)
# Create CAM model based on trained model
model = create_cam_model(pretrained_model,
gap_spacial_size,
num_classes,
in_layer_name,
CAM_CONV_LAYER)
# Train your CAM model
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True, verbose=1,
validation_data=(X_test, Y_test))
# Save model
model.save_weights('')
return model
def cam_model():
"""
Return your trained CAM model
"""
return
def plot_cam_map(img_path, img_size, batch_size, label_plot):
"""Plot class activation map.
"""
# CAM input spacial size
gap_spacial_size = 14
# Use your trained CAM model
model = cam_model()
# Load and format data
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
# Get class map image
im_cam = get_cam_img(model,
test_data,
label_plot,
CAM_CONV_LAYER,
ratio=img_size / gap_spacial_size)
# Resize if the shape of class map is not equal to original image
if im_cam.shape != im_ori[:, :, 0].shape:
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
# Show the predictions. You can analyze the class map with the predictions.
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
# Plot original image and the class map
plt.imshow(im_ori)
plt.imshow(im_cam,
cmap='jet',
alpha=0.5,
interpolation='bilinear')
plt.show()
| 25.784 | 97 | 0.730996 |
import numpy as np
import cv2
import matplotlib.pylab as plt
from keras.preprocessing.image import load_img
from keras.models import model_from_json
from models import (
create_cam_model, preprocess_image,
get_cam_img
)
CAM_CONV_LAYER = 'cam_conv_layer'
def read_model(model_path, weigths_path):
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model
def train_cam_model(X_train, Y_train, X_test, Y_test,
batch_size, nb_epoch):
pretrained_model_path = ''
pretrained_weights_path = ''
pretrained_model_name = 'VGG16'
num_classes = 10
gap_spacial_size = 14
if pretrained_model_name == 'VGG16':
in_layer_name = 'block5_conv3'
elif pretrained_model_name == 'InceptionV3':
in_layer_name = 'batchnormalization_921'
elif pretrained_model_name == 'ResNet50':
in_layer_name = 'merge_13'
else:
in_layer_name = ''
pretrained_model = read_model(pretrained_model_path,
pretrained_weights_path)
model = create_cam_model(pretrained_model,
gap_spacial_size,
num_classes,
in_layer_name,
CAM_CONV_LAYER)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True, verbose=1,
validation_data=(X_test, Y_test))
model.save_weights('')
return model
def cam_model():
return
def plot_cam_map(img_path, img_size, batch_size, label_plot):
gap_spacial_size = 14
model = cam_model()
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
im_cam = get_cam_img(model,
test_data,
label_plot,
CAM_CONV_LAYER,
ratio=img_size / gap_spacial_size)
if im_cam.shape != im_ori[:, :, 0].shape:
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
plt.imshow(im_ori)
plt.imshow(im_cam,
cmap='jet',
alpha=0.5,
interpolation='bilinear')
plt.show()
| true | true |
79017c62423711590bccd2042772a130938484bb | 25,242 | py | Python | python/pyspark/sql/session.py | YoufuLi/radlog | 669c2b28f7ef4f9f6d94d353a3b180c1ad3b99e5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 2 | 2019-04-03T06:39:06.000Z | 2019-06-23T16:43:49.000Z | python/pyspark/sql/session.py | YoufuLi/radlog | 669c2b28f7ef4f9f6d94d353a3b180c1ad3b99e5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1 | 2021-08-31T03:40:28.000Z | 2021-08-31T06:38:38.000Z | python/pyspark/sql/session.py | YoufuLi/radlog | 669c2b28f7ef4f9f6d94d353a3b180c1ad3b99e5 | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 1 | 2019-04-03T05:00:51.000Z | 2019-04-03T05:00:51.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 38.833846 | 100 | 0.598605 |
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
class Builder(object):
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
for key, value in self._options.items():
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
return self._sc
@property
@since(2.0)
def version(self):
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| true | true |
79017db60e73d804537d0dcbeefc4a9df9259ba7 | 4,771 | py | Python | core/tests/test_helpers.py | uktrade/directory-ui-supplier | b91bb07dbcb1d69a032a0536b8eff6e0e96196ef | [
"MIT"
] | 2 | 2017-06-02T09:09:06.000Z | 2017-07-19T22:51:16.000Z | core/tests/test_helpers.py | uktrade/directory-ui-supplier | b91bb07dbcb1d69a032a0536b8eff6e0e96196ef | [
"MIT"
] | 409 | 2016-12-28T12:14:27.000Z | 2019-08-01T11:11:48.000Z | core/tests/test_helpers.py | uktrade/directory-ui-supplier | b91bb07dbcb1d69a032a0536b8eff6e0e96196ef | [
"MIT"
] | 5 | 2017-08-30T08:11:29.000Z | 2019-06-04T20:40:34.000Z | import pytest
import requests
from directory_constants import expertise, sectors
from django.shortcuts import Http404
from django.urls import reverse
from core import helpers
import core.tests.helpers
@pytest.mark.parametrize('status_code,exception', (
(400, requests.exceptions.HTTPError),
(404, Http404),
(500, requests.exceptions.HTTPError),
))
def test_handle_cms_response_error(status_code, exception):
response = core.tests.helpers.create_response(status_code=status_code)
with pytest.raises(exception):
helpers.handle_cms_response(response)
def test_handle_cms_response_ok():
response = core.tests.helpers.create_response(
status_code=200, json_payload={'field': 'value'}
)
assert helpers.handle_cms_response(response) == {'field': 'value'}
@pytest.mark.parametrize('path,expect_code', (
('/', None),
('?language=pt', 'pt'),
('/?language=ar', 'ar'),
('/industries?language=es', 'es'),
('/industries/?language=zh-hans', 'zh-hans'),
('/industries/aerospace?language=de', 'de'),
('/industries/automotive/?language=fr', 'fr'),
('?lang=fr', 'fr'),
('?language=de&lang=de', 'de'),
('?lang=pt&language=es', 'es')
))
def test_get_language_from_querystring(path, expect_code, rf):
url = reverse('index')
request = rf.get(url + path)
language_code = helpers.get_language_from_querystring(request)
assert language_code == expect_code
def test_company_parser_serialize_for_template(retrieve_profile_data):
company = helpers.CompanyParser(retrieve_profile_data)
assert company.serialize_for_template() == {
'address': '123 Fake Street, Fakeville, London, E14 6XK',
'address_line_1': '123 Fake Street',
'address_line_2': 'Fakeville',
'country': 'GB',
'date_of_creation': '02 March 2015',
'description': 'Ecommerce website',
'email_address': 'test@example.com',
'email_full_name': 'Jeremy',
'employees': '501-1,000',
'expertise_countries': '',
'expertise_industries': '',
'expertise_languages': '',
'expertise_products_services': {},
'expertise_regions': '',
'facebook_url': 'http://www.facebook.com',
'has_expertise': False,
'keywords': 'word1, word2',
'linkedin_url': 'http://www.linkedin.com',
'locality': 'London',
'logo': 'nice.jpg',
'mobile_number': '07506043448',
'modified': '2016-11-23T11:21:10.977518Z',
'name': 'Great company',
'number': '01234567',
'po_box': 'abc',
'postal_code': 'E14 6XK',
'postal_full_name': 'Jeremy',
'sectors': 'Security',
'slug': 'great-company',
'summary': 'this is a short summary',
'supplier_case_studies': [],
'twitter_url': 'http://www.twitter.com',
'verified_with_code': True,
'website': 'http://example.com',
'company_type': 'COMPANIES_HOUSE',
'is_published_investment_support_directory': True,
'is_published_find_a_supplier': True,
'is_in_companies_house': True
}
def test_company_parser_serialize_for_template_empty():
company = helpers.CompanyParser({})
assert company.serialize_for_template() == {}
def test_get_results_from_search_response_xss(retrieve_profile_data):
response = core.tests.helpers.create_response(json_payload={
'hits': {
'total': 1,
'hits': [
{
'_source': retrieve_profile_data,
'highlight': {
'description': [
'<a onmouseover=javascript:func()>stuff</a>',
'to the max <em>wolf</em>.'
]
}
}
]
}
})
formatted = helpers.get_results_from_search_response(response)
assert formatted['results'][0]['highlight'] == (
'<a onmouseover=javascript:func()>stuff</a>...to the max '
'<em>wolf</em>.'
)
def test_get_filters_labels():
filters = {
'expertise_languages': ['aa'],
'q': 'foo',
'page': 5,
'expertise_regions': ['NORTH_EAST'],
'expertise_products_services_financial': [expertise.FINANCIAL[1]],
'industries': [sectors.AEROSPACE, sectors.ADVANCED_MANUFACTURING],
'expertise_products_services_human_resources': [
'Employment and talent research'
],
}
expected = [
'Afar',
'North East',
'Insurance',
'Aerospace',
'Advanced manufacturing',
'Employment and talent research',
]
assert helpers.get_filters_labels(filters) == expected
| 31.388158 | 78 | 0.607001 | import pytest
import requests
from directory_constants import expertise, sectors
from django.shortcuts import Http404
from django.urls import reverse
from core import helpers
import core.tests.helpers
@pytest.mark.parametrize('status_code,exception', (
(400, requests.exceptions.HTTPError),
(404, Http404),
(500, requests.exceptions.HTTPError),
))
def test_handle_cms_response_error(status_code, exception):
response = core.tests.helpers.create_response(status_code=status_code)
with pytest.raises(exception):
helpers.handle_cms_response(response)
def test_handle_cms_response_ok():
response = core.tests.helpers.create_response(
status_code=200, json_payload={'field': 'value'}
)
assert helpers.handle_cms_response(response) == {'field': 'value'}
@pytest.mark.parametrize('path,expect_code', (
('/', None),
('?language=pt', 'pt'),
('/?language=ar', 'ar'),
('/industries?language=es', 'es'),
('/industries/?language=zh-hans', 'zh-hans'),
('/industries/aerospace?language=de', 'de'),
('/industries/automotive/?language=fr', 'fr'),
('?lang=fr', 'fr'),
('?language=de&lang=de', 'de'),
('?lang=pt&language=es', 'es')
))
def test_get_language_from_querystring(path, expect_code, rf):
url = reverse('index')
request = rf.get(url + path)
language_code = helpers.get_language_from_querystring(request)
assert language_code == expect_code
def test_company_parser_serialize_for_template(retrieve_profile_data):
company = helpers.CompanyParser(retrieve_profile_data)
assert company.serialize_for_template() == {
'address': '123 Fake Street, Fakeville, London, E14 6XK',
'address_line_1': '123 Fake Street',
'address_line_2': 'Fakeville',
'country': 'GB',
'date_of_creation': '02 March 2015',
'description': 'Ecommerce website',
'email_address': 'test@example.com',
'email_full_name': 'Jeremy',
'employees': '501-1,000',
'expertise_countries': '',
'expertise_industries': '',
'expertise_languages': '',
'expertise_products_services': {},
'expertise_regions': '',
'facebook_url': 'http://www.facebook.com',
'has_expertise': False,
'keywords': 'word1, word2',
'linkedin_url': 'http://www.linkedin.com',
'locality': 'London',
'logo': 'nice.jpg',
'mobile_number': '07506043448',
'modified': '2016-11-23T11:21:10.977518Z',
'name': 'Great company',
'number': '01234567',
'po_box': 'abc',
'postal_code': 'E14 6XK',
'postal_full_name': 'Jeremy',
'sectors': 'Security',
'slug': 'great-company',
'summary': 'this is a short summary',
'supplier_case_studies': [],
'twitter_url': 'http://www.twitter.com',
'verified_with_code': True,
'website': 'http://example.com',
'company_type': 'COMPANIES_HOUSE',
'is_published_investment_support_directory': True,
'is_published_find_a_supplier': True,
'is_in_companies_house': True
}
def test_company_parser_serialize_for_template_empty():
company = helpers.CompanyParser({})
assert company.serialize_for_template() == {}
def test_get_results_from_search_response_xss(retrieve_profile_data):
response = core.tests.helpers.create_response(json_payload={
'hits': {
'total': 1,
'hits': [
{
'_source': retrieve_profile_data,
'highlight': {
'description': [
'<a onmouseover=javascript:func()>stuff</a>',
'to the max <em>wolf</em>.'
]
}
}
]
}
})
formatted = helpers.get_results_from_search_response(response)
assert formatted['results'][0]['highlight'] == (
'<a onmouseover=javascript:func()>stuff</a>...to the max '
'<em>wolf</em>.'
)
def test_get_filters_labels():
filters = {
'expertise_languages': ['aa'],
'q': 'foo',
'page': 5,
'expertise_regions': ['NORTH_EAST'],
'expertise_products_services_financial': [expertise.FINANCIAL[1]],
'industries': [sectors.AEROSPACE, sectors.ADVANCED_MANUFACTURING],
'expertise_products_services_human_resources': [
'Employment and talent research'
],
}
expected = [
'Afar',
'North East',
'Insurance',
'Aerospace',
'Advanced manufacturing',
'Employment and talent research',
]
assert helpers.get_filters_labels(filters) == expected
| true | true |
79017dde051b610290823593c239642b85ff636f | 6,965 | py | Python | auction/models/bases.py | JohnRomanski/django-auction | bc6982c8f34a9a6914badb203424eca7f3219685 | [
"MIT"
] | 1 | 2021-02-04T21:48:53.000Z | 2021-02-04T21:48:53.000Z | auction/models/bases.py | JohnRomanski/django-auction | bc6982c8f34a9a6914badb203424eca7f3219685 | [
"MIT"
] | null | null | null | auction/models/bases.py | JohnRomanski/django-auction | bc6982c8f34a9a6914badb203424eca7f3219685 | [
"MIT"
] | null | null | null | from decimal import Decimal
from django.db import models
from polymorphic.models import PolymorphicModel
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from auction.utils.loader import get_model_string
from django.conf import settings
class CurrencyField(models.DecimalField):
def to_python(self, value):
try:
return super(CurrencyField, self).to_python(value=value).quantize(Decimal("0.01"))
except AttributeError:
return None
class BaseAuction(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Auction name'))
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
start_date = models.DateTimeField(verbose_name=_('Start date'))
end_date = models.DateTimeField(verbose_name=_('End date'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
total_bids = models.IntegerField(default=0, verbose_name=_('Total bids'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction')
verbose_name_plural = _('Auctions')
def __unicode__(self):
return self.name
class BaseBidBasket(models.Model):
"""
This models functions similarly to a shopping cart, except it expects a logged in user.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('User'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid basket')
verbose_name_plural = _('Bid baskets')
def add_bid(self, lot, amount):
from auction.models import BidItem
self.save()
if not lot.is_biddable:
return False
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
from auction.models.lot import Lot
item,created = BidItem.objects.get_or_create(bid_basket=self,
content_type=ContentType.objects.get_for_model(Lot),
lot_id=lot.pk)
if item:
item.amount=amount
item.save()
return item
def update_bid(self, bid_basket_item_id, amount):
"""
Update amount of bid. Delete bid if amount is 0.
"""
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
if amount == 0:
bid_basket_item.delete()
else:
bid_basket_item.amount = amount
bid_basket_item.save()
self.save()
return bid_basket_item
def delete_bid(self, bid_basket_item_id):
"""
Delete a single item from bid basket.
"""
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
bid_basket_item.delete()
return bid_basket_item
def empty(self):
"""
Remove all bids from bid basket.
"""
if self.pk:
bids = self.bids.all()
for bid in bids:
if not bid.is_locked():
bid.delete()
@property
def bids(self):
"""
Used as accessor for abstract related (BaseBidItem.bid_items).
If you override BaseBidItem and use a label other than "auction"
you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.
Example: foo_biditem_related
(where your label is "foo" and your model is "BidItem")
"""
bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME',
'auction_biditem_related')
return getattr(self, bids)
@property
def total_bids(self):
"""
Returns total bids in basket.
"""
return len(self.bids.all())
class BaseAuctionLot(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Lot name'))
slug = models.SlugField(auto_created=True, verbose_name=_('Slug'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
is_biddable = models.BooleanField(default=False, verbose_name=_('Is biddable?'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_lots",
verbose_name=_('Content type'))
object_id = models.PositiveIntegerField(verbose_name=_('Object ID'))
content_object = GenericForeignKey('content_type', 'object_id')
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction lot')
verbose_name_plural = _('Auction lots')
def __unicode__(self):
return self.name
@property
def is_locked(self):
"""
This property is meant to be overwritten with your own logic. Bid baskets
check this method to find out if a bid can be manipulated.
"""
import auction.utils.generic
now = auction.utils.generic.get_current_time()
return self.content_object.end_date <= now
class BaseBidItem(models.Model):
"""
This is a holder for total number of bids and a pointer to
item being bid on.
"""
bid_basket = models.ForeignKey(get_model_string("BidBasket"), on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Bid basket'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Content type'))
lot_id = models.PositiveIntegerField(verbose_name=_('Lot ID'))
lot_object = GenericForeignKey('content_type', 'lot_id')
amount = CurrencyField(max_digits=10, decimal_places=2, null=True, blank=True, verbose_name=_('Amount'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid item')
verbose_name_plural = _('Bid items')
def is_locked(self):
return self.lot.is_locked
@property
def lot(self):
return self.lot_object | 36.657895 | 169 | 0.649103 | from decimal import Decimal
from django.db import models
from polymorphic.models import PolymorphicModel
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from auction.utils.loader import get_model_string
from django.conf import settings
class CurrencyField(models.DecimalField):
def to_python(self, value):
try:
return super(CurrencyField, self).to_python(value=value).quantize(Decimal("0.01"))
except AttributeError:
return None
class BaseAuction(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Auction name'))
slug = models.SlugField(unique=True, verbose_name=_('Slug'))
start_date = models.DateTimeField(verbose_name=_('Start date'))
end_date = models.DateTimeField(verbose_name=_('End date'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
total_bids = models.IntegerField(default=0, verbose_name=_('Total bids'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction')
verbose_name_plural = _('Auctions')
def __unicode__(self):
return self.name
class BaseBidBasket(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('User'))
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid basket')
verbose_name_plural = _('Bid baskets')
def add_bid(self, lot, amount):
from auction.models import BidItem
self.save()
if not lot.is_biddable:
return False
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
from auction.models.lot import Lot
item,created = BidItem.objects.get_or_create(bid_basket=self,
content_type=ContentType.objects.get_for_model(Lot),
lot_id=lot.pk)
if item:
item.amount=amount
item.save()
return item
def update_bid(self, bid_basket_item_id, amount):
try:
amount = Decimal(amount)
except Exception as e:
amount = Decimal('0')
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
if amount == 0:
bid_basket_item.delete()
else:
bid_basket_item.amount = amount
bid_basket_item.save()
self.save()
return bid_basket_item
def delete_bid(self, bid_basket_item_id):
bid_basket_item = self.bids.get(pk=bid_basket_item_id)
if not bid_basket_item.is_locked():
bid_basket_item.delete()
return bid_basket_item
def empty(self):
if self.pk:
bids = self.bids.all()
for bid in bids:
if not bid.is_locked():
bid.delete()
@property
def bids(self):
bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME',
'auction_biditem_related')
return getattr(self, bids)
@property
def total_bids(self):
return len(self.bids.all())
class BaseAuctionLot(PolymorphicModel):
name = models.CharField(max_length=255, verbose_name=_('Lot name'))
slug = models.SlugField(auto_created=True, verbose_name=_('Slug'))
active = models.BooleanField(default=False, verbose_name=_('Active'))
is_biddable = models.BooleanField(default=False, verbose_name=_('Is biddable?'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_lots",
verbose_name=_('Content type'))
object_id = models.PositiveIntegerField(verbose_name=_('Object ID'))
content_object = GenericForeignKey('content_type', 'object_id')
date_added = models.DateTimeField(auto_now_add=True, verbose_name=_('Date added'))
last_modified = models.DateTimeField(auto_now=True, verbose_name=_('Last modified'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Auction lot')
verbose_name_plural = _('Auction lots')
def __unicode__(self):
return self.name
@property
def is_locked(self):
import auction.utils.generic
now = auction.utils.generic.get_current_time()
return self.content_object.end_date <= now
class BaseBidItem(models.Model):
bid_basket = models.ForeignKey(get_model_string("BidBasket"), on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Bid basket'))
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s_related", verbose_name=_('Content type'))
lot_id = models.PositiveIntegerField(verbose_name=_('Lot ID'))
lot_object = GenericForeignKey('content_type', 'lot_id')
amount = CurrencyField(max_digits=10, decimal_places=2, null=True, blank=True, verbose_name=_('Amount'))
class Meta:
abstract = True
app_label = 'auction'
verbose_name = _('Bid item')
verbose_name_plural = _('Bid items')
def is_locked(self):
return self.lot.is_locked
@property
def lot(self):
return self.lot_object | true | true |
79017ec0b14fe9afce886b1930ce41463eeb6e58 | 2,871 | py | Python | cvpods/engine/predictor.py | reinforcementdriving/cvpods | 32d98b74745020be035a0e20337ad934201615c4 | [
"Apache-2.0"
] | null | null | null | cvpods/engine/predictor.py | reinforcementdriving/cvpods | 32d98b74745020be035a0e20337ad934201615c4 | [
"Apache-2.0"
] | null | null | null | cvpods/engine/predictor.py | reinforcementdriving/cvpods | 32d98b74745020be035a0e20337ad934201615c4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from copy import deepcopy
import torch
from cvpods.checkpoint import DefaultCheckpointer
from cvpods.data import build_transform_gens
__all__ = ["DefaultPredictor"]
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config that runs on
single device for a single input image.
Compared to using the model directly, this class does the following additions:
1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
4. Take one input image and produce a single output, instead of a batch.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
metadata (Metadata): the metadata of the underlying dataset, obtained from
cfg.DATASETS.TEST.
Examples:
.. code-block:: python
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg, meta):
self.cfg = deepcopy(cfg)
if self.cfg.MODEL.DEVICE.startswith("cuda:"):
torch.cuda.set_device(self.cfg.MODEL.DEVICE)
self.cfg.MODEL.DEVICE = "cuda"
self.model = cfg.build_model(self.cfg)
self.model.eval()
self.metadata = meta
checkpointer = DefaultCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.transform_gen = build_transform_gens(cfg.INPUT.AUG.TEST_PIPELINES)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(
): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = original_image
for tfm_gen in self.transform_gen:
image = tfm_gen.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
| 35.012195 | 93 | 0.634622 |
from copy import deepcopy
import torch
from cvpods.checkpoint import DefaultCheckpointer
from cvpods.data import build_transform_gens
__all__ = ["DefaultPredictor"]
class DefaultPredictor:
def __init__(self, cfg, meta):
self.cfg = deepcopy(cfg)
if self.cfg.MODEL.DEVICE.startswith("cuda:"):
torch.cuda.set_device(self.cfg.MODEL.DEVICE)
self.cfg.MODEL.DEVICE = "cuda"
self.model = cfg.build_model(self.cfg)
self.model.eval()
self.metadata = meta
checkpointer = DefaultCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.transform_gen = build_transform_gens(cfg.INPUT.AUG.TEST_PIPELINES)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
with torch.no_grad(
):
if self.input_format == "RGB":
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = original_image
for tfm_gen in self.transform_gen:
image = tfm_gen.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
| true | true |
7901805fc8cdf86e45d1ce43b59c72b49765ded2 | 16,435 | py | Python | mezzanine/core/managers.py | abendig/mezzanine | 3219ac9ba2d6d94ce63e8b2a747c3b264b13beec | [
"BSD-2-Clause"
] | null | null | null | mezzanine/core/managers.py | abendig/mezzanine | 3219ac9ba2d6d94ce63e8b2a747c3b264b13beec | [
"BSD-2-Clause"
] | null | null | null | mezzanine/core/managers.py | abendig/mezzanine | 3219ac9ba2d6d94ce63e8b2a747c3b264b13beec | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.loading import get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Django 1.5 explicitly prevents managers being accessed from
abstract classes, which is behaviour the search API has relied
on for years. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
| 43.364116 | 78 | 0.61369 | from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.loading import get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
def published(self, for_user=None):
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
rch_fields)
if not self._search_fields:
return self.none()
"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
def url_map(self, for_user=None, **kwargs):
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
| true | true |
790180b6fa6ecd539778f977414d0a204e12f6bf | 2,465 | py | Python | tests/dashboard/widgets/test_reg_error_normality_widget.py | Tapot/evidently | ab9b91425d622566b663565508dd1c43e741f515 | [
"Apache-2.0"
] | null | null | null | tests/dashboard/widgets/test_reg_error_normality_widget.py | Tapot/evidently | ab9b91425d622566b663565508dd1c43e741f515 | [
"Apache-2.0"
] | null | null | null | tests/dashboard/widgets/test_reg_error_normality_widget.py | Tapot/evidently | ab9b91425d622566b663565508dd1c43e741f515 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import pandas as pd
import pytest
from evidently.analyzers.regression_performance_analyzer import RegressionPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.dashboard.widgets.reg_error_normality_widget import RegErrorNormalityWidget
@pytest.fixture
def widget() -> RegErrorNormalityWidget:
options_provider = OptionsProvider()
widget = RegErrorNormalityWidget("test_widget")
widget.options_provider = options_provider
return widget
def test_reg_error_normality_widget_analyzer_list(widget: RegErrorNormalityWidget) -> None:
assert widget.analyzers() == [RegressionPerformanceAnalyzer]
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping, dataset, expected_result",
(
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
None,
ColumnMapping(),
None,
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
ColumnMapping(),
"reference",
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
),
)
def test_reg_error_normality_widget_simple_case(
widget: RegErrorNormalityWidget,
reference_data: pd.DataFrame,
current_data: pd.DataFrame,
data_mapping: ColumnMapping,
dataset: Optional[str],
expected_result: BaseWidgetInfo,
) -> None:
if dataset is not None:
widget.dataset = dataset
analyzer = RegressionPerformanceAnalyzer()
analyzer.options_provider = widget.options_provider
analyzer_results = analyzer.calculate(reference_data, current_data, data_mapping)
result = widget.calculate(
reference_data, current_data, data_mapping, {RegressionPerformanceAnalyzer: analyzer_results}
)
if expected_result is not None:
# we have some widget for visualization
assert result.type == expected_result.type
assert result.title == expected_result.title
assert result.size == expected_result.size
assert result.params is not None
else:
# no widget data, show nothing
assert result is None
| 33.310811 | 101 | 0.696552 | from typing import Optional
import pandas as pd
import pytest
from evidently.analyzers.regression_performance_analyzer import RegressionPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.dashboard.widgets.reg_error_normality_widget import RegErrorNormalityWidget
@pytest.fixture
def widget() -> RegErrorNormalityWidget:
options_provider = OptionsProvider()
widget = RegErrorNormalityWidget("test_widget")
widget.options_provider = options_provider
return widget
def test_reg_error_normality_widget_analyzer_list(widget: RegErrorNormalityWidget) -> None:
assert widget.analyzers() == [RegressionPerformanceAnalyzer]
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping, dataset, expected_result",
(
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
None,
ColumnMapping(),
None,
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
ColumnMapping(),
"reference",
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
),
)
def test_reg_error_normality_widget_simple_case(
widget: RegErrorNormalityWidget,
reference_data: pd.DataFrame,
current_data: pd.DataFrame,
data_mapping: ColumnMapping,
dataset: Optional[str],
expected_result: BaseWidgetInfo,
) -> None:
if dataset is not None:
widget.dataset = dataset
analyzer = RegressionPerformanceAnalyzer()
analyzer.options_provider = widget.options_provider
analyzer_results = analyzer.calculate(reference_data, current_data, data_mapping)
result = widget.calculate(
reference_data, current_data, data_mapping, {RegressionPerformanceAnalyzer: analyzer_results}
)
if expected_result is not None:
assert result.type == expected_result.type
assert result.title == expected_result.title
assert result.size == expected_result.size
assert result.params is not None
else:
assert result is None
| true | true |
7901825d081d4734618c345f5991cc10b05e2e24 | 1,370 | py | Python | Chapter13/listing13_7.py | hohsieh/osgeopy-code | 932157c748c8fedb67d862b266a983fdd29ead56 | [
"MIT"
] | 160 | 2015-01-11T06:45:11.000Z | 2022-03-07T15:09:57.000Z | Chapter13/listing13_7.py | sthagen/osgeopy-code | bc85f4ec7a630b53502ee491e400057b67cdab22 | [
"MIT"
] | 3 | 2018-09-29T11:34:13.000Z | 2020-07-20T16:45:23.000Z | Chapter13/listing13_7.py | sthagen/osgeopy-code | bc85f4ec7a630b53502ee491e400057b67cdab22 | [
"MIT"
] | 108 | 2015-05-28T11:29:01.000Z | 2022-02-12T12:01:46.000Z | # Script that uses meshgrid to get map coordinates and then plots
# the DEM in 3d.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from osgeo import gdal
ds = gdal.Open(r'D:\osgeopy-data\Washington\dem\sthelens_utm.tif')
band = ds.GetRasterBand(1)
ov_band = band.GetOverview(band.GetOverviewCount() - 3)
data = ov_band.ReadAsArray()
# Calculate bounding coordinates.
geotransform = ds.GetGeoTransform()
minx = geotransform[0]
maxy = geotransform[3]
maxx = minx + ov_band.XSize * geotransform[1]
miny = maxy + ov_band.YSize * geotransform[5]
# Get the x and y arrays.
x = np.arange(minx, maxx, geotransform[1])
y = np.arange(maxy, miny, geotransform[5])
x, y = np.meshgrid(x[:ov_band.XSize], y[:ov_band.YSize])
# Make the 3D plot.
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(x, y, data, cmap='gist_earth', lw=0)
plt.axis('equal')
# # Change the viewpoint and turn the ticks off.
# ax.view_init(elev=55, azim=60)
# plt.axis('off')
# # Create an animation.
# import matplotlib.animation as animation
# def animate(i):
# ax.view_init(elev=65, azim=i)
# anim = animation.FuncAnimation(
# fig, animate, frames=range(0, 360, 10), interval=100)
# plt.axis('off')
# # If you have FFmpeg and it's in your path, you can save the
# # animation.
# anim.save('d:/temp/helens.mp4', 'ffmpeg')
plt.show()
| 27.4 | 66 | 0.713869 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from osgeo import gdal
ds = gdal.Open(r'D:\osgeopy-data\Washington\dem\sthelens_utm.tif')
band = ds.GetRasterBand(1)
ov_band = band.GetOverview(band.GetOverviewCount() - 3)
data = ov_band.ReadAsArray()
geotransform = ds.GetGeoTransform()
minx = geotransform[0]
maxy = geotransform[3]
maxx = minx + ov_band.XSize * geotransform[1]
miny = maxy + ov_band.YSize * geotransform[5]
x = np.arange(minx, maxx, geotransform[1])
y = np.arange(maxy, miny, geotransform[5])
x, y = np.meshgrid(x[:ov_band.XSize], y[:ov_band.YSize])
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(x, y, data, cmap='gist_earth', lw=0)
plt.axis('equal')
| true | true |
790182ffa231c5da4b7f2cb69babcff3bf2c1dc2 | 4,946 | py | Python | ebl/fragmentarium/application/annotations_service.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | null | null | null | ebl/fragmentarium/application/annotations_service.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | null | null | null | ebl/fragmentarium/application/annotations_service.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | null | null | null | from io import BytesIO
from typing import Tuple, Sequence
import attr
from PIL import Image
from ebl.changelog import Changelog
from ebl.ebl_ai_client import EblAiClient
from ebl.files.application.file_repository import FileRepository
from ebl.fragmentarium.application.annotations_repository import AnnotationsRepository
from ebl.fragmentarium.application.annotations_schema import AnnotationsSchema
from ebl.fragmentarium.application.cropped_sign_image import CroppedSign
from ebl.fragmentarium.application.cropped_sign_images_repository import (
CroppedSignImage,
CroppedSignImagesRepository,
)
from ebl.fragmentarium.application.fragment_repository import FragmentRepository
from ebl.fragmentarium.domain.annotation import (
Annotations,
AnnotationValueType,
)
from ebl.transliteration.domain.line_label import LineLabel
from ebl.transliteration.domain.museum_number import MuseumNumber
from ebl.users.domain.user import User
@attr.attrs(auto_attribs=True, frozen=True)
class AnnotationsService:
_ebl_ai_client: EblAiClient
_annotations_repository: AnnotationsRepository
_photo_repository: FileRepository
_changelog: Changelog
_fragments_repository: FragmentRepository
_photos_repository: FileRepository
_cropped_sign_images_repository: CroppedSignImagesRepository
def generate_annotations(
self, number: MuseumNumber, threshold: float = 0.3
) -> Annotations:
fragment_image = self._photo_repository.query_by_file_name(f"{number}.jpg")
return self._ebl_ai_client.generate_annotations(
number, fragment_image, threshold
)
def find(self, number: MuseumNumber) -> Annotations:
return self._annotations_repository.query_by_museum_number(number)
def _label_by_line_number(
self, line_number_to_match: int, labels: Sequence[LineLabel]
) -> str:
matching_label = None
for label in labels:
label_line_number = label.line_number
if label_line_number and label_line_number.is_matching_number(
line_number_to_match
):
matching_label = label
return matching_label.formatted_label if matching_label else ""
def _cropped_image_from_annotations_helper(
self,
annotations: Annotations,
image: Image.Image,
script: str,
labels: Sequence[LineLabel],
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
cropped_sign_images = []
updated_cropped_annotations = []
for annotation in annotations.annotations:
label = (
self._label_by_line_number(annotation.data.path[0], labels)
if annotation.data.type != AnnotationValueType.BLANK
else ""
)
cropped_image = annotation.crop_image(image)
cropped_sign_image = CroppedSignImage.create(cropped_image)
cropped_sign_images.append(cropped_sign_image)
updated_cropped_annotation = attr.evolve(
annotation,
cropped_sign=CroppedSign(
cropped_sign_image.image_id,
script,
label,
),
)
updated_cropped_annotations.append(updated_cropped_annotation)
return (
attr.evolve(annotations, annotations=updated_cropped_annotations),
cropped_sign_images,
)
def _cropped_image_from_annotations(
self, annotations: Annotations
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
fragment = self._fragments_repository.query_by_museum_number(
annotations.fragment_number
)
fragment_image = self._photos_repository.query_by_file_name(
f"{annotations.fragment_number}.jpg"
)
image_bytes = fragment_image.read()
image = Image.open(BytesIO(image_bytes), mode="r")
return self._cropped_image_from_annotations_helper(
annotations, image, fragment.script, fragment.text.labels
)
def update(self, annotations: Annotations, user: User) -> Annotations:
old_annotations = self._annotations_repository.query_by_museum_number(
annotations.fragment_number
)
_id = str(annotations.fragment_number)
schema = AnnotationsSchema()
(
annotations_with_image_ids,
cropped_sign_images,
) = self._cropped_image_from_annotations(annotations)
self._annotations_repository.create_or_update(annotations_with_image_ids)
self._cropped_sign_images_repository.create_many(cropped_sign_images)
self._changelog.create(
"annotations",
user.profile,
{"_id": _id, **schema.dump(old_annotations)},
{"_id": _id, **schema.dump(annotations_with_image_ids)},
)
return annotations_with_image_ids
| 37.755725 | 86 | 0.697736 | from io import BytesIO
from typing import Tuple, Sequence
import attr
from PIL import Image
from ebl.changelog import Changelog
from ebl.ebl_ai_client import EblAiClient
from ebl.files.application.file_repository import FileRepository
from ebl.fragmentarium.application.annotations_repository import AnnotationsRepository
from ebl.fragmentarium.application.annotations_schema import AnnotationsSchema
from ebl.fragmentarium.application.cropped_sign_image import CroppedSign
from ebl.fragmentarium.application.cropped_sign_images_repository import (
CroppedSignImage,
CroppedSignImagesRepository,
)
from ebl.fragmentarium.application.fragment_repository import FragmentRepository
from ebl.fragmentarium.domain.annotation import (
Annotations,
AnnotationValueType,
)
from ebl.transliteration.domain.line_label import LineLabel
from ebl.transliteration.domain.museum_number import MuseumNumber
from ebl.users.domain.user import User
@attr.attrs(auto_attribs=True, frozen=True)
class AnnotationsService:
_ebl_ai_client: EblAiClient
_annotations_repository: AnnotationsRepository
_photo_repository: FileRepository
_changelog: Changelog
_fragments_repository: FragmentRepository
_photos_repository: FileRepository
_cropped_sign_images_repository: CroppedSignImagesRepository
def generate_annotations(
self, number: MuseumNumber, threshold: float = 0.3
) -> Annotations:
fragment_image = self._photo_repository.query_by_file_name(f"{number}.jpg")
return self._ebl_ai_client.generate_annotations(
number, fragment_image, threshold
)
def find(self, number: MuseumNumber) -> Annotations:
return self._annotations_repository.query_by_museum_number(number)
def _label_by_line_number(
self, line_number_to_match: int, labels: Sequence[LineLabel]
) -> str:
matching_label = None
for label in labels:
label_line_number = label.line_number
if label_line_number and label_line_number.is_matching_number(
line_number_to_match
):
matching_label = label
return matching_label.formatted_label if matching_label else ""
def _cropped_image_from_annotations_helper(
self,
annotations: Annotations,
image: Image.Image,
script: str,
labels: Sequence[LineLabel],
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
cropped_sign_images = []
updated_cropped_annotations = []
for annotation in annotations.annotations:
label = (
self._label_by_line_number(annotation.data.path[0], labels)
if annotation.data.type != AnnotationValueType.BLANK
else ""
)
cropped_image = annotation.crop_image(image)
cropped_sign_image = CroppedSignImage.create(cropped_image)
cropped_sign_images.append(cropped_sign_image)
updated_cropped_annotation = attr.evolve(
annotation,
cropped_sign=CroppedSign(
cropped_sign_image.image_id,
script,
label,
),
)
updated_cropped_annotations.append(updated_cropped_annotation)
return (
attr.evolve(annotations, annotations=updated_cropped_annotations),
cropped_sign_images,
)
def _cropped_image_from_annotations(
self, annotations: Annotations
) -> Tuple[Annotations, Sequence[CroppedSignImage]]:
fragment = self._fragments_repository.query_by_museum_number(
annotations.fragment_number
)
fragment_image = self._photos_repository.query_by_file_name(
f"{annotations.fragment_number}.jpg"
)
image_bytes = fragment_image.read()
image = Image.open(BytesIO(image_bytes), mode="r")
return self._cropped_image_from_annotations_helper(
annotations, image, fragment.script, fragment.text.labels
)
def update(self, annotations: Annotations, user: User) -> Annotations:
old_annotations = self._annotations_repository.query_by_museum_number(
annotations.fragment_number
)
_id = str(annotations.fragment_number)
schema = AnnotationsSchema()
(
annotations_with_image_ids,
cropped_sign_images,
) = self._cropped_image_from_annotations(annotations)
self._annotations_repository.create_or_update(annotations_with_image_ids)
self._cropped_sign_images_repository.create_many(cropped_sign_images)
self._changelog.create(
"annotations",
user.profile,
{"_id": _id, **schema.dump(old_annotations)},
{"_id": _id, **schema.dump(annotations_with_image_ids)},
)
return annotations_with_image_ids
| true | true |
79018386380f0f5a1f9ccfd59456ae05b5b003cf | 1,562 | py | Python | procedures/points_B_ICG_Lozaano_Equation.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 1 | 2017-11-10T10:42:07.000Z | 2017-11-10T10:42:07.000Z | procedures/points_B_ICG_Lozaano_Equation.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 21 | 2017-12-28T13:39:55.000Z | 2018-07-16T14:34:29.000Z | procedures/points_B_ICG_Lozaano_Equation.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 1 | 2018-02-25T13:57:50.000Z | 2018-02-25T13:57:50.000Z | import numpy as np
from sigman.analyzer import InvalidArgumentError
procedure_type = 'points'
description = (
"""Procedure calculate time of B point from equation:
RB = 1.233RZ-0.0032RZ^2-31.59
where RZ - time between R and dz/dt max [ms]
RB - time between R and B
Equation was proposed by D.L. Lozano in paper "Where to B in dZ/dt" (2007)
""")
author = 'mzylinski'
arguments = {
}
default_arguments = {
}
output_type = 'B'
required_waves = ['Signal']
required_points = [ 'R','dzdtmax']
def procedure(waves, points, begin_time, end_time, settings):
wave = waves['Signal']
R = points['R']
dzdtmax = points['dzdtmax']
r_x = []
r_y = []
for i in range(0,len(R)-1):
data = wave.data_slice(R.data_x[i], R.data_x[i+1])
RZ = (dzdtmax.data_x[i] - R.data_x[i])/wave.sample_length
RB = 1.233*RZ -0.0032*(RZ*RZ)-31.59
t = int(round(RB))
if (t<0):
t = 0
r_y.append(data[t])
r_x.append(R.data_x[i] + t*wave.sample_length)
return r_x, r_y
def interpret_arguments(waves, points, arguments):
output_arguments = {}
for key, item in arguments.items():
try:
output_arguments[key] = float(item)
except:
raise InvalidArgumentError("{} is invalid.".format(arguments[key]))
return output_arguments
def execute(waves, points, begin_time, end_time, arguments):
arguments = interpret_arguments(waves, points, arguments)
return procedure(waves, points, begin_time, end_time, arguments) | 26.931034 | 79 | 0.630602 | import numpy as np
from sigman.analyzer import InvalidArgumentError
procedure_type = 'points'
description = (
"""Procedure calculate time of B point from equation:
RB = 1.233RZ-0.0032RZ^2-31.59
where RZ - time between R and dz/dt max [ms]
RB - time between R and B
Equation was proposed by D.L. Lozano in paper "Where to B in dZ/dt" (2007)
""")
author = 'mzylinski'
arguments = {
}
default_arguments = {
}
output_type = 'B'
required_waves = ['Signal']
required_points = [ 'R','dzdtmax']
def procedure(waves, points, begin_time, end_time, settings):
wave = waves['Signal']
R = points['R']
dzdtmax = points['dzdtmax']
r_x = []
r_y = []
for i in range(0,len(R)-1):
data = wave.data_slice(R.data_x[i], R.data_x[i+1])
RZ = (dzdtmax.data_x[i] - R.data_x[i])/wave.sample_length
RB = 1.233*RZ -0.0032*(RZ*RZ)-31.59
t = int(round(RB))
if (t<0):
t = 0
r_y.append(data[t])
r_x.append(R.data_x[i] + t*wave.sample_length)
return r_x, r_y
def interpret_arguments(waves, points, arguments):
output_arguments = {}
for key, item in arguments.items():
try:
output_arguments[key] = float(item)
except:
raise InvalidArgumentError("{} is invalid.".format(arguments[key]))
return output_arguments
def execute(waves, points, begin_time, end_time, arguments):
arguments = interpret_arguments(waves, points, arguments)
return procedure(waves, points, begin_time, end_time, arguments) | true | true |
790183bb62764197eb48efd6c0be97946020eba2 | 396 | py | Python | kongoauth/wsgi.py | toast38coza/KongOAuth | 827d6f0cb47c67903f0a0236f56cd20c18bb84bb | [
"MIT"
] | null | null | null | kongoauth/wsgi.py | toast38coza/KongOAuth | 827d6f0cb47c67903f0a0236f56cd20c18bb84bb | [
"MIT"
] | 3 | 2020-02-11T23:09:10.000Z | 2021-06-10T18:21:30.000Z | kongoauth/wsgi.py | toast38coza/KongOAuth | 827d6f0cb47c67903f0a0236f56cd20c18bb84bb | [
"MIT"
] | null | null | null | """
WSGI config for kongoauth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kongoauth.settings")
application = get_wsgi_application()
| 23.294118 | 78 | 0.787879 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kongoauth.settings")
application = get_wsgi_application()
| true | true |
7901840e1caccf4c39615aa05782447db4ea89d4 | 13,614 | py | Python | tabular/src/autogluon/tabular/models/knn/knn_model.py | taesup-aws/autogluon | 51b20c4a18de148b4f06b384e56b102c86727153 | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/models/knn/knn_model.py | taesup-aws/autogluon | 51b20c4a18de148b4f06b384e56b102c86727153 | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/models/knn/knn_model.py | taesup-aws/autogluon | 51b20c4a18de148b4f06b384e56b102c86727153 | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
import math
import psutil
import time
from autogluon.common.features.types import R_BOOL, R_CATEGORY, R_OBJECT, S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT
from autogluon.core.constants import REGRESSION
from autogluon.core.utils.exceptions import NotEnoughMemoryError
from autogluon.core.models.abstract.model_trial import skip_hpo
from autogluon.core.models import AbstractModel
from autogluon.core.utils.utils import normalize_pred_probas
logger = logging.getLogger(__name__)
# TODO: Normalize data!
class KNNModel(AbstractModel):
"""
KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._X_unused_index = None # Keeps track of unused training data indices, necessary for LOO OOF generation
def _get_model_type(self):
if self.params_aux.get('use_daal', True):
try:
# TODO: Add more granular switch, currently this affects all future KNN models even if they had `use_daal=False`
from sklearnex import patch_sklearn
patch_sklearn("knn_classifier")
patch_sklearn("knn_regressor")
# daal backend for KNN seems to be 20-40x+ faster than native sklearn with no downsides.
logger.log(15, '\tUsing daal4py KNN backend...')
except:
pass
try:
from ._knn_loo_variants import KNeighborsClassifier, KNeighborsRegressor
except:
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
logger.warning('WARNING: Leave-one-out variants of KNN failed to import. Falling back to standard KNN implementations.')
if self.problem_type == REGRESSION:
return KNeighborsRegressor
else:
return KNeighborsClassifier
def _preprocess(self, X, **kwargs):
X = super()._preprocess(X, **kwargs)
X = X.fillna(0).to_numpy(dtype=np.float32)
return X
def _set_default_params(self):
default_params = {
'weights': 'uniform',
'n_jobs': -1,
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_BOOL, R_CATEGORY, R_OBJECT], # TODO: Eventually use category features
ignored_type_group_special=[S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
@classmethod
def _get_default_ag_args(cls) -> dict:
default_ag_args = super()._get_default_ag_args()
extra_ag_args = {'valid_stacker': False}
default_ag_args.update(extra_ag_args)
return default_ag_args
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': True}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
# TODO: Enable HPO for KNN
def _get_default_searchspace(self):
spaces = {}
return spaces
def _fit(self,
X,
y,
time_limit=None,
sample_weight=None,
**kwargs):
time_start = time.time()
X = self.preprocess(X)
self._validate_fit_memory_usage(X=X) # TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately
if sample_weight is not None: # TODO: support
logger.log(15, "sample_weight not yet supported for KNNModel, this model will ignore them in training.")
num_rows_max = len(X)
# FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model.
if time_limit is None or num_rows_max <= 10000:
self.model = self._get_model_type()(**self._get_model_params()).fit(X, y)
else:
self.model = self._fit_with_samples(X=X, y=y, time_limit=time_limit - (time.time() - time_start))
def _validate_fit_memory_usage(self, X):
max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio']
model_size_bytes = 4 * X.shape[0] * X.shape[1] # Assuming float32 types
expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size
if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB
available_mem = psutil.virtual_memory().available
model_memory_ratio = expected_final_model_size_bytes / available_mem
if model_memory_ratio > (0.15 * max_memory_usage_ratio):
logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...')
if model_memory_ratio > (0.20 * max_memory_usage_ratio):
raise NotEnoughMemoryError # don't train full model to avoid OOM error
# TODO: Won't work for RAPIDS without modification
# TODO: Technically isn't OOF, but can be used inplace of OOF. Perhaps rename to something more accurate?
def get_oof_pred_proba(self, X, normalize=None, **kwargs):
"""X should be the same X passed to `.fit`"""
y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs)
if normalize is None:
normalize = self.normalize_pred_probas
if normalize:
y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type)
y_oof_pred_proba = y_oof_pred_proba.astype(np.float32)
return y_oof_pred_proba
def _get_oof_pred_proba(self, X, **kwargs):
if callable(getattr(self.model, "predict_proba_loo", None)):
y_oof_pred_proba = self.model.predict_proba_loo()
elif callable(getattr(self.model, "predict_loo", None)):
y_oof_pred_proba = self.model.predict_loo()
else:
raise AssertionError(f'Model class {type(self.model)} does not support out-of-fold prediction generation.')
y_oof_pred_proba = self._convert_proba_to_unified_form(y_oof_pred_proba)
if X is not None and self._X_unused_index:
X_unused = X.iloc[self._X_unused_index]
y_pred_proba_new = self.predict_proba(X_unused)
X_unused_index = set(self._X_unused_index)
num_rows = len(X)
X_used_index = [i for i in range(num_rows) if i not in X_unused_index]
oof_pred_shape = y_oof_pred_proba.shape
if len(oof_pred_shape) == 1:
y_oof_tmp = np.zeros(num_rows, dtype=np.float32)
y_oof_tmp[X_used_index] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index] = y_pred_proba_new
else:
y_oof_tmp = np.zeros((num_rows, oof_pred_shape[1]), dtype=np.float32)
y_oof_tmp[X_used_index, :] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index, :] = y_pred_proba_new
y_oof_pred_proba = y_oof_tmp
return y_oof_pred_proba
# TODO: Consider making this fully generic and available to all models
def _fit_with_samples(self,
X,
y,
time_limit,
start_samples=10000,
max_samples=None,
sample_growth_factor=2,
sample_time_growth_factor=8):
"""
Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used.
X and y must already be preprocessed.
Parameters
----------
X : np.ndarray
The training data features (preprocessed).
y : Series
The training data ground truth labels.
time_limit : float, default = None
Time limit in seconds to adhere to when fitting model.
start_samples : int, default = 10000
Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples.
For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...]
max_samples : int, default = None
The maximum number of samples to use.
If None or greater than the number of rows in X, then it is set equal to the number of rows in X.
sample_growth_factor : float, default = 2
The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit.
sample_time_growth_factor : float, default = 8
The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds.
If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early.
"""
time_start = time.time()
num_rows_samples = []
if max_samples is None:
num_rows_max = len(X)
else:
num_rows_max = min(len(X), max_samples)
num_rows_cur = start_samples
while True:
num_rows_cur = min(num_rows_cur, num_rows_max)
num_rows_samples.append(num_rows_cur)
if num_rows_cur == num_rows_max:
break
num_rows_cur *= sample_growth_factor
num_rows_cur = math.ceil(num_rows_cur)
if num_rows_cur * 1.5 >= num_rows_max:
num_rows_cur = num_rows_max
def sample_func(chunk, frac):
# Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba)
n = max(math.ceil(len(chunk) * frac), 1)
return chunk.sample(n=n, replace=False, random_state=0)
if self.problem_type != REGRESSION:
y_df = y.to_frame(name='label').reset_index(drop=True)
else:
y_df = None
time_start_sample_loop = time.time()
time_limit_left = time_limit - (time_start_sample_loop - time_start)
model_type = self._get_model_type()
idx = None
for i, samples in enumerate(num_rows_samples):
if samples != num_rows_max:
if self.problem_type == REGRESSION:
idx = np.random.choice(num_rows_max, size=samples, replace=False)
else:
idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index
X_samp = X[idx, :]
y_samp = y.iloc[idx]
else:
X_samp = X
y_samp = y
idx = None
self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp)
time_limit_left_prior = time_limit_left
time_fit_end_sample = time.time()
time_limit_left = time_limit - (time_fit_end_sample - time_start)
time_fit_sample = time_limit_left_prior - time_limit_left
time_required_for_next = time_fit_sample * sample_time_growth_factor
logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)')
if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1:
logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)')
break
if idx is not None:
idx = set(idx)
self._X_unused_index = [i for i in range(num_rows_max) if i not in idx]
return self.model
# TODO: Add HPO
def _hyperparameter_tune(self, **kwargs):
return skip_hpo(self, **kwargs)
def _more_tags(self):
return {'valid_oof': True}
class FAISSModel(KNNModel):
def _get_model_type(self):
from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor
if self.problem_type == REGRESSION:
return FAISSNeighborsRegressor
else:
return FAISSNeighborsClassifier
def _set_default_params(self):
default_params = {
'index_factory_string': 'Flat',
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': False}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
def _more_tags(self):
return {'valid_oof': False} | 48.106007 | 237 | 0.653518 | import logging
import numpy as np
import math
import psutil
import time
from autogluon.common.features.types import R_BOOL, R_CATEGORY, R_OBJECT, S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT
from autogluon.core.constants import REGRESSION
from autogluon.core.utils.exceptions import NotEnoughMemoryError
from autogluon.core.models.abstract.model_trial import skip_hpo
from autogluon.core.models import AbstractModel
from autogluon.core.utils.utils import normalize_pred_probas
logger = logging.getLogger(__name__)
class KNNModel(AbstractModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._X_unused_index = None
def _get_model_type(self):
if self.params_aux.get('use_daal', True):
try:
from sklearnex import patch_sklearn
patch_sklearn("knn_classifier")
patch_sklearn("knn_regressor")
logger.log(15, '\tUsing daal4py KNN backend...')
except:
pass
try:
from ._knn_loo_variants import KNeighborsClassifier, KNeighborsRegressor
except:
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
logger.warning('WARNING: Leave-one-out variants of KNN failed to import. Falling back to standard KNN implementations.')
if self.problem_type == REGRESSION:
return KNeighborsRegressor
else:
return KNeighborsClassifier
def _preprocess(self, X, **kwargs):
X = super()._preprocess(X, **kwargs)
X = X.fillna(0).to_numpy(dtype=np.float32)
return X
def _set_default_params(self):
default_params = {
'weights': 'uniform',
'n_jobs': -1,
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_BOOL, R_CATEGORY, R_OBJECT],
ignored_type_group_special=[S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
@classmethod
def _get_default_ag_args(cls) -> dict:
default_ag_args = super()._get_default_ag_args()
extra_ag_args = {'valid_stacker': False}
default_ag_args.update(extra_ag_args)
return default_ag_args
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': True}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
def _get_default_searchspace(self):
spaces = {}
return spaces
def _fit(self,
X,
y,
time_limit=None,
sample_weight=None,
**kwargs):
time_start = time.time()
X = self.preprocess(X)
self._validate_fit_memory_usage(X=X)
if sample_weight is not None:
logger.log(15, "sample_weight not yet supported for KNNModel, this model will ignore them in training.")
num_rows_max = len(X)
if time_limit is None or num_rows_max <= 10000:
self.model = self._get_model_type()(**self._get_model_params()).fit(X, y)
else:
self.model = self._fit_with_samples(X=X, y=y, time_limit=time_limit - (time.time() - time_start))
def _validate_fit_memory_usage(self, X):
max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio']
model_size_bytes = 4 * X.shape[0] * X.shape[1]
expected_final_model_size_bytes = model_size_bytes * 3.6
if expected_final_model_size_bytes > 10000000:
available_mem = psutil.virtual_memory().available
model_memory_ratio = expected_final_model_size_bytes / available_mem
if model_memory_ratio > (0.15 * max_memory_usage_ratio):
logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...')
if model_memory_ratio > (0.20 * max_memory_usage_ratio):
raise NotEnoughMemoryError
# TODO: Won't work for RAPIDS without modification
def get_oof_pred_proba(self, X, normalize=None, **kwargs):
y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs)
if normalize is None:
normalize = self.normalize_pred_probas
if normalize:
y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type)
y_oof_pred_proba = y_oof_pred_proba.astype(np.float32)
return y_oof_pred_proba
def _get_oof_pred_proba(self, X, **kwargs):
if callable(getattr(self.model, "predict_proba_loo", None)):
y_oof_pred_proba = self.model.predict_proba_loo()
elif callable(getattr(self.model, "predict_loo", None)):
y_oof_pred_proba = self.model.predict_loo()
else:
raise AssertionError(f'Model class {type(self.model)} does not support out-of-fold prediction generation.')
y_oof_pred_proba = self._convert_proba_to_unified_form(y_oof_pred_proba)
if X is not None and self._X_unused_index:
X_unused = X.iloc[self._X_unused_index]
y_pred_proba_new = self.predict_proba(X_unused)
X_unused_index = set(self._X_unused_index)
num_rows = len(X)
X_used_index = [i for i in range(num_rows) if i not in X_unused_index]
oof_pred_shape = y_oof_pred_proba.shape
if len(oof_pred_shape) == 1:
y_oof_tmp = np.zeros(num_rows, dtype=np.float32)
y_oof_tmp[X_used_index] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index] = y_pred_proba_new
else:
y_oof_tmp = np.zeros((num_rows, oof_pred_shape[1]), dtype=np.float32)
y_oof_tmp[X_used_index, :] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index, :] = y_pred_proba_new
y_oof_pred_proba = y_oof_tmp
return y_oof_pred_proba
# TODO: Consider making this fully generic and available to all models
def _fit_with_samples(self,
X,
y,
time_limit,
start_samples=10000,
max_samples=None,
sample_growth_factor=2,
sample_time_growth_factor=8):
time_start = time.time()
num_rows_samples = []
if max_samples is None:
num_rows_max = len(X)
else:
num_rows_max = min(len(X), max_samples)
num_rows_cur = start_samples
while True:
num_rows_cur = min(num_rows_cur, num_rows_max)
num_rows_samples.append(num_rows_cur)
if num_rows_cur == num_rows_max:
break
num_rows_cur *= sample_growth_factor
num_rows_cur = math.ceil(num_rows_cur)
if num_rows_cur * 1.5 >= num_rows_max:
num_rows_cur = num_rows_max
def sample_func(chunk, frac):
# Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba)
n = max(math.ceil(len(chunk) * frac), 1)
return chunk.sample(n=n, replace=False, random_state=0)
if self.problem_type != REGRESSION:
y_df = y.to_frame(name='label').reset_index(drop=True)
else:
y_df = None
time_start_sample_loop = time.time()
time_limit_left = time_limit - (time_start_sample_loop - time_start)
model_type = self._get_model_type()
idx = None
for i, samples in enumerate(num_rows_samples):
if samples != num_rows_max:
if self.problem_type == REGRESSION:
idx = np.random.choice(num_rows_max, size=samples, replace=False)
else:
idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index
X_samp = X[idx, :]
y_samp = y.iloc[idx]
else:
X_samp = X
y_samp = y
idx = None
self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp)
time_limit_left_prior = time_limit_left
time_fit_end_sample = time.time()
time_limit_left = time_limit - (time_fit_end_sample - time_start)
time_fit_sample = time_limit_left_prior - time_limit_left
time_required_for_next = time_fit_sample * sample_time_growth_factor
logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)')
if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1:
logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)')
break
if idx is not None:
idx = set(idx)
self._X_unused_index = [i for i in range(num_rows_max) if i not in idx]
return self.model
# TODO: Add HPO
def _hyperparameter_tune(self, **kwargs):
return skip_hpo(self, **kwargs)
def _more_tags(self):
return {'valid_oof': True}
class FAISSModel(KNNModel):
def _get_model_type(self):
from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor
if self.problem_type == REGRESSION:
return FAISSNeighborsRegressor
else:
return FAISSNeighborsClassifier
def _set_default_params(self):
default_params = {
'index_factory_string': 'Flat',
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': False}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
def _more_tags(self):
return {'valid_oof': False} | true | true |
7901846bb4048b9e0097cb7fd952f59be6683ece | 1,750 | py | Python | tests/test_cache.py | gvigneron/webapp2_caffeine | 1c920e77b48555886dff5206cc5e83179f23c8f1 | [
"Apache-2.0"
] | null | null | null | tests/test_cache.py | gvigneron/webapp2_caffeine | 1c920e77b48555886dff5206cc5e83179f23c8f1 | [
"Apache-2.0"
] | null | null | null | tests/test_cache.py | gvigneron/webapp2_caffeine | 1c920e77b48555886dff5206cc5e83179f23c8f1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import datetime
import time
import unittest
from webapp2_caffeine.cache import CacheContainer
from webapp2_caffeine.cache import flush
class DummyCache(CacheContainer):
key = 'dummy_cache'
@property
def fresh_value(self):
return datetime.now()
class CacheContainerTest(unittest.TestCase):
def setUp(self):
flush()
def tearDown(self):
flush()
def test_fresh_value(self):
container = CacheContainer()
with self.assertRaises(NotImplementedError):
container.fresh_value
def test_set(self):
container = CacheContainer()
with self.assertRaises(ValueError):
container.set('my value')
container = DummyCache()
value, expiration = container.set('my value')
self.assertEqual(value, 'my value')
self.assertTrue(21000 < expiration - time.time() < 21600)
self.assertEqual(container.get(), 'my value')
def test_get(self):
container = DummyCache()
self.assertEqual(container.get(), None)
container.set('my value', 1000)
self.assertEqual(container.get(), None)
container.set('my value')
self.assertEqual(container.get(), 'my value')
def test_delete(self):
container = DummyCache()
container.set('my value')
container.delete()
self.assertEqual(container.get(), None)
def test_update(self):
container = DummyCache()
container.update()
self.assertTrue(container.get())
def test_value(self):
container = DummyCache()
old_value = container.value
self.assertTrue(old_value)
self.assertTrue(container.value, old_value)
| 26.119403 | 65 | 0.641143 |
from datetime import datetime
import time
import unittest
from webapp2_caffeine.cache import CacheContainer
from webapp2_caffeine.cache import flush
class DummyCache(CacheContainer):
key = 'dummy_cache'
@property
def fresh_value(self):
return datetime.now()
class CacheContainerTest(unittest.TestCase):
def setUp(self):
flush()
def tearDown(self):
flush()
def test_fresh_value(self):
container = CacheContainer()
with self.assertRaises(NotImplementedError):
container.fresh_value
def test_set(self):
container = CacheContainer()
with self.assertRaises(ValueError):
container.set('my value')
container = DummyCache()
value, expiration = container.set('my value')
self.assertEqual(value, 'my value')
self.assertTrue(21000 < expiration - time.time() < 21600)
self.assertEqual(container.get(), 'my value')
def test_get(self):
container = DummyCache()
self.assertEqual(container.get(), None)
container.set('my value', 1000)
self.assertEqual(container.get(), None)
container.set('my value')
self.assertEqual(container.get(), 'my value')
def test_delete(self):
container = DummyCache()
container.set('my value')
container.delete()
self.assertEqual(container.get(), None)
def test_update(self):
container = DummyCache()
container.update()
self.assertTrue(container.get())
def test_value(self):
container = DummyCache()
old_value = container.value
self.assertTrue(old_value)
self.assertTrue(container.value, old_value)
| true | true |
790184f2ee82e1be0871186debe15bfcb841f23a | 3,103 | py | Python | app/service/socketservice.py | mohansd/cyx-xElec-server | bef67274ba85d6172ac1ef4dd3df8c8ce86c6c61 | [
"Apache-2.0"
] | null | null | null | app/service/socketservice.py | mohansd/cyx-xElec-server | bef67274ba85d6172ac1ef4dd3df8c8ce86c6c61 | [
"Apache-2.0"
] | null | null | null | app/service/socketservice.py | mohansd/cyx-xElec-server | bef67274ba85d6172ac1ef4dd3df8c8ce86c6c61 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from app.libs.utils import data_decode
import socket, socketserver, threading
import traceback
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
ip = ""
port = 0
timeOut = 100
def __init__(self, request, client_address, server):
from app.service.device import Device
self.socket = None
self.addr = None
self.cloud_id = None
self.device = Device()
self.sign = None
self.device_id = None
self.timestamp = None
super().__init__(request, client_address, server)
def setup(self):
self.ip = self.client_address[0].strip()
self.port = self.client_address[1]
self.request.settimeout(self.timeOut)
self.addr = self.ip + str(self.port)
self.socket = self.request
print(self.ip)
def handle(self):
try:
while True:
try:
# time.sleep(1)
data = self.request.recv(1024)
except socket.timeout:
print(self.ip + ":" + str(self.port) + "接收超时")
break
if data:
data = data_decode(data)
self.device.parse_data(data, self)
else:
break
except Exception as e:
with open("err_log.log", "a+") as f:
f.write(traceback.format_exc()+'\r\r')
print(self.client_address, "连接断开")
finally:
self.request.close()
def finish(self):
if self.cloud_id is None:
print(self.ip + ":" + str(self.port) + "断开连接!")
else:
get_instance().remove_client(self.cloud_id)
print(self.ip + ":" + str(self.port) + self.cloud_id + "断开连接!")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class TCPServer:
instance = None
@staticmethod
def get_instance():
print("start")
if TCPServer.instance is None:
TCPServer.instance = TCPServer()
return TCPServer.instance
def __init__(self):
self.clients = {}
self.server = None
try:
self.server = ThreadedTCPServer(("0.0.0.0", 5002), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
# server_thread.join()
except (KeyboardInterrupt, SystemExit, Exception) as e:
print(e)
print("end")
self.server.shutdown()
self.server.close()
def add_client(self, cloud, sock):
self.clients[cloud] = sock
print("this is clients", self.clients)
def remove_client(self, cloud):
if cloud in self.clients:
print("删除设备" + cloud)
from app.service.device import Device
Device.offline_alarm(self.clients[cloud])
self.clients.pop(cloud)
def get_instance():
return TCPServer.get_instance()
| 30.126214 | 89 | 0.565259 |
from app.libs.utils import data_decode
import socket, socketserver, threading
import traceback
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
ip = ""
port = 0
timeOut = 100
def __init__(self, request, client_address, server):
from app.service.device import Device
self.socket = None
self.addr = None
self.cloud_id = None
self.device = Device()
self.sign = None
self.device_id = None
self.timestamp = None
super().__init__(request, client_address, server)
def setup(self):
self.ip = self.client_address[0].strip()
self.port = self.client_address[1]
self.request.settimeout(self.timeOut)
self.addr = self.ip + str(self.port)
self.socket = self.request
print(self.ip)
def handle(self):
try:
while True:
try:
data = self.request.recv(1024)
except socket.timeout:
print(self.ip + ":" + str(self.port) + "接收超时")
break
if data:
data = data_decode(data)
self.device.parse_data(data, self)
else:
break
except Exception as e:
with open("err_log.log", "a+") as f:
f.write(traceback.format_exc()+'\r\r')
print(self.client_address, "连接断开")
finally:
self.request.close()
def finish(self):
if self.cloud_id is None:
print(self.ip + ":" + str(self.port) + "断开连接!")
else:
get_instance().remove_client(self.cloud_id)
print(self.ip + ":" + str(self.port) + self.cloud_id + "断开连接!")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class TCPServer:
instance = None
@staticmethod
def get_instance():
print("start")
if TCPServer.instance is None:
TCPServer.instance = TCPServer()
return TCPServer.instance
def __init__(self):
self.clients = {}
self.server = None
try:
self.server = ThreadedTCPServer(("0.0.0.0", 5002), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
except (KeyboardInterrupt, SystemExit, Exception) as e:
print(e)
print("end")
self.server.shutdown()
self.server.close()
def add_client(self, cloud, sock):
self.clients[cloud] = sock
print("this is clients", self.clients)
def remove_client(self, cloud):
if cloud in self.clients:
print("删除设备" + cloud)
from app.service.device import Device
Device.offline_alarm(self.clients[cloud])
self.clients.pop(cloud)
def get_instance():
return TCPServer.get_instance()
| true | true |
79018509c992165fc5ca9150cd98660c06939562 | 651 | py | Python | app/Meetup/Filter.py | benjifs/site_bot | 9d342d39e927e4f0b175ccb186c3f8c997bd8d35 | [
"MIT"
] | 1 | 2019-10-27T13:13:12.000Z | 2019-10-27T13:13:12.000Z | app/Meetup/Filter.py | benjifs/site_bot | 9d342d39e927e4f0b175ccb186c3f8c997bd8d35 | [
"MIT"
] | 10 | 2019-10-02T12:34:03.000Z | 2020-10-28T00:19:20.000Z | app/Meetup/Filter.py | OpenTwinCities/site_bot | a6e5d056462bed1559eed8232e4d1c0e6323e3c4 | [
"MIT"
] | 2 | 2019-10-06T23:12:49.000Z | 2019-10-22T23:22:09.000Z | # -*- coding: utf8 -*-
def filter_event(event, happening_before):
"""Check if the following keys are present. These
keys only show up when using the API. If fetching
from the iCal, JSON, or RSS feeds it will just compare
the dates
"""
status = True
visibility = True
actions = True
if 'status' in event:
status = event['status'] == 'upcoming'
if 'visibility' in event:
visibility = event['visibility'] == 'public'
if 'self' in event:
actions = 'announce' not in event['self']['actions']
return (status and visibility and actions and
event['time'] < happening_before)
| 28.304348 | 60 | 0.6298 |
def filter_event(event, happening_before):
status = True
visibility = True
actions = True
if 'status' in event:
status = event['status'] == 'upcoming'
if 'visibility' in event:
visibility = event['visibility'] == 'public'
if 'self' in event:
actions = 'announce' not in event['self']['actions']
return (status and visibility and actions and
event['time'] < happening_before)
| true | true |
79018541666340e37da7601f2619ebf06e3d9707 | 26,877 | py | Python | keras/initializers/initializers_v2.py | winnerineast/keras | 1e94c43d7ba0d7b6b629b2300e40470f495bdbe0 | [
"Apache-2.0"
] | null | null | null | keras/initializers/initializers_v2.py | winnerineast/keras | 1e94c43d7ba0d7b6b629b2300e40470f495bdbe0 | [
"Apache-2.0"
] | null | null | null | keras/initializers/initializers_v2.py | winnerineast/keras | 1e94c43d7ba0d7b6b629b2300e40470f495bdbe0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 2.
"""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.initializers.Initializer')
class Initializer(object):
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
"""
config.pop('dtype', None)
return cls(**config)
@keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[])
class Zeros(tf.zeros_initializer, Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[])
class Ones(tf.ones_initializer, Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Constant',
'keras.initializers.constant',
v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return tf.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
@keras_export('keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
v1=[])
class RandomUniform(tf.random_uniform_initializer, Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(RandomUniform, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.RandomNormal',
'keras.initializers.random_normal',
v1=[])
class RandomNormal(tf.random_normal_initializer, Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(RandomNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal',
v1=[])
class TruncatedNormal(init_ops_v2.TruncatedNormal, Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(TruncatedNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.VarianceScaling',
'keras.initializers.variance_scaling',
v1=[])
class VarianceScaling(init_ops_v2.VarianceScaling, Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(VarianceScaling, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Orthogonal',
'keras.initializers.orthogonal',
v1=[])
class Orthogonal(init_ops_v2.Orthogonal, Initializer):
"""Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Orthogonal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Identity',
'keras.initializers.identity',
v1=[])
class Identity(init_ops_v2.Identity, Initializer):
"""Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Identity, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.GlorotUniform',
'keras.initializers.glorot_uniform',
v1=[])
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.GlorotNormal',
'keras.initializers.glorot_normal',
v1=[])
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunNormal',
'keras.initializers.lecun_normal',
v1=[])
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunUniform',
'keras.initializers.lecun_uniform',
v1=[])
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeNormal',
'keras.initializers.he_normal',
v1=[])
class HeNormal(VarianceScaling):
"""He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeUniform',
'keras.initializers.he_uniform',
v1=[])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return tf.as_dtype(dtype)
| 35.041721 | 162 | 0.699111 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.initializers.Initializer')
class Initializer(object):
def __call__(self, shape, dtype=None, **kwargs):
raise NotImplementedError
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
config.pop('dtype', None)
return cls(**config)
@keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[])
class Zeros(tf.zeros_initializer, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[])
class Ones(tf.ones_initializer, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Constant',
'keras.initializers.constant',
v1=[])
class Constant(Initializer):
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
del kwargs
return tf.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
@keras_export('keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
v1=[])
class RandomUniform(tf.random_uniform_initializer, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(RandomUniform, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.RandomNormal',
'keras.initializers.random_normal',
v1=[])
class RandomNormal(tf.random_normal_initializer, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(RandomNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal',
v1=[])
class TruncatedNormal(init_ops_v2.TruncatedNormal, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(TruncatedNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.VarianceScaling',
'keras.initializers.variance_scaling',
v1=[])
class VarianceScaling(init_ops_v2.VarianceScaling, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(VarianceScaling, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Orthogonal',
'keras.initializers.orthogonal',
v1=[])
class Orthogonal(init_ops_v2.Orthogonal, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(Orthogonal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Identity',
'keras.initializers.identity',
v1=[])
class Identity(init_ops_v2.Identity, Initializer):
def __call__(self, shape, dtype=None, **kwargs):
return super(Identity, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.GlorotUniform',
'keras.initializers.glorot_uniform',
v1=[])
class GlorotUniform(VarianceScaling):
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.GlorotNormal',
'keras.initializers.glorot_normal',
v1=[])
class GlorotNormal(VarianceScaling):
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunNormal',
'keras.initializers.lecun_normal',
v1=[])
class LecunNormal(VarianceScaling):
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunUniform',
'keras.initializers.lecun_uniform',
v1=[])
class LecunUniform(VarianceScaling):
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeNormal',
'keras.initializers.he_normal',
v1=[])
class HeNormal(VarianceScaling):
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeUniform',
'keras.initializers.he_uniform',
v1=[])
class HeUniform(VarianceScaling):
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return tf.as_dtype(dtype)
| true | true |
790187d3fe7399dfc406b03a12c16e7f62d46c93 | 9,607 | py | Python | tools/c7n_mailer/c7n_mailer/cli.py | blade2005/cloud-custodian | 21ecdd60ae8a78887cf9d135367b283ce88b0fd9 | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/c7n_mailer/cli.py | blade2005/cloud-custodian | 21ecdd60ae8a78887cf9d135367b283ce88b0fd9 | [
"Apache-2.0"
] | 79 | 2019-03-20T12:27:06.000Z | 2019-08-14T14:07:04.000Z | tools/c7n_mailer/c7n_mailer/cli.py | blade2005/cloud-custodian | 21ecdd60ae8a78887cf9d135367b283ce88b0fd9 | [
"Apache-2.0"
] | 2 | 2019-04-22T15:20:23.000Z | 2019-08-27T12:37:51.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import logging
from os import path
import boto3
import jsonschema
from c7n_mailer import deploy, utils
from c7n_mailer.azure_mailer.azure_queue_processor import MailerAzureQueueProcessor
from c7n_mailer.azure_mailer import deploy as azure_deploy
from c7n_mailer.sqs_queue_processor import MailerSqsQueueProcessor
from c7n_mailer.utils import get_provider, Providers
from ruamel import yaml
AZURE_KV_SECRET_SCHEMA = {
'type': 'object',
'properties': {
'type': {'enum': ['azure.keyvault']},
'secret': {'type': 'string'}
},
'required': ['type', 'secret'],
'additionalProperties': False
}
SECURED_STRING_SCHEMA = {
'oneOf': [
{'type': 'string'},
AZURE_KV_SECRET_SCHEMA
]
}
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['queue_url'],
'properties': {
'queue_url': {'type': 'string'},
'from_address': {'type': 'string'},
'contact_tags': {'type': 'array', 'items': {'type': 'string'}},
'org_domain': {'type': 'string'},
# Standard Lambda Function Config
'region': {'type': 'string'},
'role': {'type': 'string'},
'runtime': {'type': 'string'},
'memory': {'type': 'integer'},
'timeout': {'type': 'integer'},
'subnets': {'type': 'array', 'items': {'type': 'string'}},
'security_groups': {'type': 'array', 'items': {'type': 'string'}},
'dead_letter_config': {'type': 'object'},
'lambda_name': {'type': 'string'},
'lambda_description': {'type': 'string'},
'lambda_tags': {'type': 'object'},
'lambda_schedule': {'type': 'string'},
# Azure Function Config
'function_properties': {
'type': 'object',
'appInsights': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'storageAccount': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'servicePlan': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string',
'skuTier': 'string',
'skuName': 'string'}
}
]
},
},
'function_schedule': {'type': 'string'},
'function_skuCode': {'type': 'string'},
'function_sku': {'type': 'string'},
# Mailer Infrastructure Config
'cache_engine': {'type': 'string'},
'smtp_server': {'type': 'string'},
'smtp_port': {'type': 'integer'},
'smtp_ssl': {'type': 'boolean'},
'smtp_username': {'type': 'string'},
'smtp_password': SECURED_STRING_SCHEMA,
'ldap_email_key': {'type': 'string'},
'ldap_uid_tags': {'type': 'array', 'items': {'type': 'string'}},
'debug': {'type': 'boolean'},
'ldap_uid_regex': {'type': 'string'},
'ldap_uri': {'type': 'string'},
'ldap_bind_dn': {'type': 'string'},
'ldap_bind_user': {'type': 'string'},
'ldap_uid_attribute': {'type': 'string'},
'ldap_manager_attribute': {'type': 'string'},
'ldap_email_attribute': {'type': 'string'},
'ldap_bind_password_in_kms': {'type': 'boolean'},
'ldap_bind_password': {'type': 'string'},
'cross_accounts': {'type': 'object'},
'ses_region': {'type': 'string'},
'redis_host': {'type': 'string'},
'redis_port': {'type': 'integer'},
'datadog_api_key': {'type': 'string'}, # TODO: encrypt with KMS?
'datadog_application_key': {'type': 'string'}, # TODO: encrypt with KMS?
'slack_token': {'type': 'string'},
'slack_webhook': {'type': 'string'},
'sendgrid_api_key': SECURED_STRING_SCHEMA,
'splunk_hec_url': {'type': 'string'},
'splunk_hec_token': {'type': 'string'},
'splunk_remove_paths': {
'type': 'array',
'items': {'type': 'string'}
},
'splunk_actions_list': {'type': 'boolean'},
'splunk_max_attempts': {'type': 'integer'},
'splunk_hec_max_length': {'type': 'integer'},
# SDK Config
'profile': {'type': 'string'},
'http_proxy': {'type': 'string'},
'https_proxy': {'type': 'string'},
# Mapping account / emails
'account_emails': {'type': 'object'}
}
}
def session_factory(mailer_config):
return boto3.Session(
region_name=mailer_config['region'],
profile_name=mailer_config.get('profile', None))
def get_logger(debug=False):
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
if debug:
logging.getLogger('botocore').setLevel(logging.DEBUG)
debug_logger = logging.getLogger('custodian-mailer')
debug_logger.setLevel(logging.DEBUG)
return debug_logger
else:
return logging.getLogger('custodian-mailer')
def get_and_validate_mailer_config(args):
with open(args.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
utils.setup_defaults(config)
return config
def get_c7n_mailer_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='mailer.yml config file')
debug_help_msg = 'sets c7n_mailer logger to debug, for maximum output (the default is INFO)'
parser.add_argument('--debug', action='store_true', help=debug_help_msg)
max_num_processes_help_msg = 'will run the mailer in parallel, integer of max processes allowed'
parser.add_argument('--max-num-processes', type=int, help=max_num_processes_help_msg)
templates_folder_help_msg = 'message templates folder location'
parser.add_argument('-t', '--templates', help=templates_folder_help_msg)
group = parser.add_mutually_exclusive_group(required=True)
update_lambda_help_msg = 'packages your c7n_mailer, uploads the zip to aws lambda as a function'
group.add_argument('--update-lambda', action='store_true', help=update_lambda_help_msg)
run_help_msg = 'run c7n-mailer locally, process sqs messages and send emails or sns messages'
group.add_argument('--run', action='store_true', help=run_help_msg)
return parser
def run_mailer_in_parallel(processor, max_num_processes):
max_num_processes = int(max_num_processes)
if max_num_processes < 1:
raise Exception
processor.max_num_processes = max_num_processes
processor.run(parallel=True)
def main():
parser = get_c7n_mailer_parser()
args = parser.parse_args()
mailer_config = get_and_validate_mailer_config(args)
args_dict = vars(args)
logger = get_logger(debug=args_dict.get('debug', False))
module_dir = path.dirname(path.abspath(__file__))
default_templates = [path.abspath(path.join(module_dir, 'msg-templates')),
path.abspath(path.join(module_dir, '..', 'msg-templates')),
path.abspath('.')]
templates = args_dict.get('templates', None)
if templates:
default_templates.append(path.abspath(path.expanduser(path.expandvars(templates))))
mailer_config['templates_folders'] = default_templates
provider = get_provider(mailer_config)
if args_dict.get('update_lambda'):
if args_dict.get('debug'):
print('\n** --debug is only supported with --run, not --update-lambda **\n')
return
if args_dict.get('max_num_processes'):
print('\n** --max-num-processes is only supported '
'with --run, not --update-lambda **\n')
return
if provider == Providers.Azure:
azure_deploy.provision(mailer_config)
elif provider == Providers.AWS:
deploy.provision(mailer_config, functools.partial(session_factory, mailer_config))
if args_dict.get('run'):
max_num_processes = args_dict.get('max_num_processes')
# Select correct processor
if provider == Providers.Azure:
processor = MailerAzureQueueProcessor(mailer_config, logger)
elif provider == Providers.AWS:
aws_session = session_factory(mailer_config)
processor = MailerSqsQueueProcessor(mailer_config, aws_session, logger)
# Execute
if max_num_processes:
run_mailer_in_parallel(processor, max_num_processes)
else:
processor.run()
if __name__ == '__main__':
main()
| 37.527344 | 100 | 0.575518 | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import functools
import logging
from os import path
import boto3
import jsonschema
from c7n_mailer import deploy, utils
from c7n_mailer.azure_mailer.azure_queue_processor import MailerAzureQueueProcessor
from c7n_mailer.azure_mailer import deploy as azure_deploy
from c7n_mailer.sqs_queue_processor import MailerSqsQueueProcessor
from c7n_mailer.utils import get_provider, Providers
from ruamel import yaml
AZURE_KV_SECRET_SCHEMA = {
'type': 'object',
'properties': {
'type': {'enum': ['azure.keyvault']},
'secret': {'type': 'string'}
},
'required': ['type', 'secret'],
'additionalProperties': False
}
SECURED_STRING_SCHEMA = {
'oneOf': [
{'type': 'string'},
AZURE_KV_SECRET_SCHEMA
]
}
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': False,
'required': ['queue_url'],
'properties': {
'queue_url': {'type': 'string'},
'from_address': {'type': 'string'},
'contact_tags': {'type': 'array', 'items': {'type': 'string'}},
'org_domain': {'type': 'string'},
'region': {'type': 'string'},
'role': {'type': 'string'},
'runtime': {'type': 'string'},
'memory': {'type': 'integer'},
'timeout': {'type': 'integer'},
'subnets': {'type': 'array', 'items': {'type': 'string'}},
'security_groups': {'type': 'array', 'items': {'type': 'string'}},
'dead_letter_config': {'type': 'object'},
'lambda_name': {'type': 'string'},
'lambda_description': {'type': 'string'},
'lambda_tags': {'type': 'object'},
'lambda_schedule': {'type': 'string'},
'function_properties': {
'type': 'object',
'appInsights': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'storageAccount': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string'}
}
]
},
'servicePlan': {
'type': 'object',
'oneOf': [
{'type': 'string'},
{'type': 'object',
'properties': {
'name': 'string',
'location': 'string',
'resourceGroupName': 'string',
'skuTier': 'string',
'skuName': 'string'}
}
]
},
},
'function_schedule': {'type': 'string'},
'function_skuCode': {'type': 'string'},
'function_sku': {'type': 'string'},
'cache_engine': {'type': 'string'},
'smtp_server': {'type': 'string'},
'smtp_port': {'type': 'integer'},
'smtp_ssl': {'type': 'boolean'},
'smtp_username': {'type': 'string'},
'smtp_password': SECURED_STRING_SCHEMA,
'ldap_email_key': {'type': 'string'},
'ldap_uid_tags': {'type': 'array', 'items': {'type': 'string'}},
'debug': {'type': 'boolean'},
'ldap_uid_regex': {'type': 'string'},
'ldap_uri': {'type': 'string'},
'ldap_bind_dn': {'type': 'string'},
'ldap_bind_user': {'type': 'string'},
'ldap_uid_attribute': {'type': 'string'},
'ldap_manager_attribute': {'type': 'string'},
'ldap_email_attribute': {'type': 'string'},
'ldap_bind_password_in_kms': {'type': 'boolean'},
'ldap_bind_password': {'type': 'string'},
'cross_accounts': {'type': 'object'},
'ses_region': {'type': 'string'},
'redis_host': {'type': 'string'},
'redis_port': {'type': 'integer'},
'datadog_api_key': {'type': 'string'},
'datadog_application_key': {'type': 'string'},
'slack_token': {'type': 'string'},
'slack_webhook': {'type': 'string'},
'sendgrid_api_key': SECURED_STRING_SCHEMA,
'splunk_hec_url': {'type': 'string'},
'splunk_hec_token': {'type': 'string'},
'splunk_remove_paths': {
'type': 'array',
'items': {'type': 'string'}
},
'splunk_actions_list': {'type': 'boolean'},
'splunk_max_attempts': {'type': 'integer'},
'splunk_hec_max_length': {'type': 'integer'},
'profile': {'type': 'string'},
'http_proxy': {'type': 'string'},
'https_proxy': {'type': 'string'},
'account_emails': {'type': 'object'}
}
}
def session_factory(mailer_config):
return boto3.Session(
region_name=mailer_config['region'],
profile_name=mailer_config.get('profile', None))
def get_logger(debug=False):
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
if debug:
logging.getLogger('botocore').setLevel(logging.DEBUG)
debug_logger = logging.getLogger('custodian-mailer')
debug_logger.setLevel(logging.DEBUG)
return debug_logger
else:
return logging.getLogger('custodian-mailer')
def get_and_validate_mailer_config(args):
with open(args.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
utils.setup_defaults(config)
return config
def get_c7n_mailer_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='mailer.yml config file')
debug_help_msg = 'sets c7n_mailer logger to debug, for maximum output (the default is INFO)'
parser.add_argument('--debug', action='store_true', help=debug_help_msg)
max_num_processes_help_msg = 'will run the mailer in parallel, integer of max processes allowed'
parser.add_argument('--max-num-processes', type=int, help=max_num_processes_help_msg)
templates_folder_help_msg = 'message templates folder location'
parser.add_argument('-t', '--templates', help=templates_folder_help_msg)
group = parser.add_mutually_exclusive_group(required=True)
update_lambda_help_msg = 'packages your c7n_mailer, uploads the zip to aws lambda as a function'
group.add_argument('--update-lambda', action='store_true', help=update_lambda_help_msg)
run_help_msg = 'run c7n-mailer locally, process sqs messages and send emails or sns messages'
group.add_argument('--run', action='store_true', help=run_help_msg)
return parser
def run_mailer_in_parallel(processor, max_num_processes):
max_num_processes = int(max_num_processes)
if max_num_processes < 1:
raise Exception
processor.max_num_processes = max_num_processes
processor.run(parallel=True)
def main():
parser = get_c7n_mailer_parser()
args = parser.parse_args()
mailer_config = get_and_validate_mailer_config(args)
args_dict = vars(args)
logger = get_logger(debug=args_dict.get('debug', False))
module_dir = path.dirname(path.abspath(__file__))
default_templates = [path.abspath(path.join(module_dir, 'msg-templates')),
path.abspath(path.join(module_dir, '..', 'msg-templates')),
path.abspath('.')]
templates = args_dict.get('templates', None)
if templates:
default_templates.append(path.abspath(path.expanduser(path.expandvars(templates))))
mailer_config['templates_folders'] = default_templates
provider = get_provider(mailer_config)
if args_dict.get('update_lambda'):
if args_dict.get('debug'):
print('\n** --debug is only supported with --run, not --update-lambda **\n')
return
if args_dict.get('max_num_processes'):
print('\n** --max-num-processes is only supported '
'with --run, not --update-lambda **\n')
return
if provider == Providers.Azure:
azure_deploy.provision(mailer_config)
elif provider == Providers.AWS:
deploy.provision(mailer_config, functools.partial(session_factory, mailer_config))
if args_dict.get('run'):
max_num_processes = args_dict.get('max_num_processes')
if provider == Providers.Azure:
processor = MailerAzureQueueProcessor(mailer_config, logger)
elif provider == Providers.AWS:
aws_session = session_factory(mailer_config)
processor = MailerSqsQueueProcessor(mailer_config, aws_session, logger)
if max_num_processes:
run_mailer_in_parallel(processor, max_num_processes)
else:
processor.run()
if __name__ == '__main__':
main()
| true | true |
790188ae02814f0707a255110003d194b42b7e35 | 17,512 | py | Python | sdk/python/pulumi_azure_native/recoveryservices/v20181220/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20181220/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20181220/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'WorkloadCrrAccessTokenResponse',
]
@pulumi.output_type
class WorkloadCrrAccessTokenResponse(dict):
def __init__(__self__, *,
object_type: str,
access_token_string: Optional[str] = None,
b_ms_active_region: Optional[str] = None,
backup_management_type: Optional[str] = None,
container_id: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
coordinator_service_stamp_id: Optional[str] = None,
coordinator_service_stamp_uri: Optional[str] = None,
datasource_container_name: Optional[str] = None,
datasource_id: Optional[str] = None,
datasource_name: Optional[str] = None,
datasource_type: Optional[str] = None,
policy_id: Optional[str] = None,
policy_name: Optional[str] = None,
protectable_object_container_host_os_name: Optional[str] = None,
protectable_object_friendly_name: Optional[str] = None,
protectable_object_parent_logical_container_name: Optional[str] = None,
protectable_object_protection_state: Optional[str] = None,
protectable_object_unique_name: Optional[str] = None,
protectable_object_workload_type: Optional[str] = None,
protection_container_id: Optional[float] = None,
protection_service_stamp_id: Optional[str] = None,
protection_service_stamp_uri: Optional[str] = None,
recovery_point_id: Optional[str] = None,
recovery_point_time: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_id: Optional[str] = None,
resource_name: Optional[str] = None,
rp_is_managed_virtual_machine: Optional[bool] = None,
rp_original_sa_option: Optional[bool] = None,
rp_tier_information: Optional[Mapping[str, str]] = None,
rp_vm_size_description: Optional[str] = None,
subscription_id: Optional[str] = None,
token_extended_information: Optional[str] = None):
"""
:param str object_type: Type of the specific object - used for deserializing
Expected value is 'WorkloadCrrAccessToken'.
:param str access_token_string: Access token used for authentication
:param str b_ms_active_region: Active region name of BMS Stamp
:param str backup_management_type: Backup Management Type
:param str container_id: Container Id
:param str container_name: Container Unique name
:param str container_type: Container Type
:param str coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore call
:param str coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore call
:param str datasource_container_name: Datasource Container Unique Name
:param str datasource_id: Datasource Id
:param str datasource_name: Datasource Friendly Name
:param str datasource_type: Datasource Type
:param str policy_id: Policy Id
:param str policy_name: Policy Name
:param float protection_container_id: Protected item container id
:param str protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore call
:param str protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore call
:param str recovery_point_id: Recovery Point Id
:param str recovery_point_time: Recovery Point Time
:param str resource_group_name: Resource Group name of the source vault
:param str resource_id: Resource Id of the source vault
:param str resource_name: Resource Name of the source vault
:param bool rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine
:param bool rp_original_sa_option: Recovery point information: Original SA option
:param Mapping[str, str] rp_tier_information: Recovery point Tier Information
:param str rp_vm_size_description: Recovery point information: VM size description
:param str subscription_id: Subscription Id of the source vault
:param str token_extended_information: Extended Information about the token like FileSpec etc.
"""
pulumi.set(__self__, "object_type", 'WorkloadCrrAccessToken')
if access_token_string is not None:
pulumi.set(__self__, "access_token_string", access_token_string)
if b_ms_active_region is not None:
pulumi.set(__self__, "b_ms_active_region", b_ms_active_region)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_type is not None:
pulumi.set(__self__, "container_type", container_type)
if coordinator_service_stamp_id is not None:
pulumi.set(__self__, "coordinator_service_stamp_id", coordinator_service_stamp_id)
if coordinator_service_stamp_uri is not None:
pulumi.set(__self__, "coordinator_service_stamp_uri", coordinator_service_stamp_uri)
if datasource_container_name is not None:
pulumi.set(__self__, "datasource_container_name", datasource_container_name)
if datasource_id is not None:
pulumi.set(__self__, "datasource_id", datasource_id)
if datasource_name is not None:
pulumi.set(__self__, "datasource_name", datasource_name)
if datasource_type is not None:
pulumi.set(__self__, "datasource_type", datasource_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
if protectable_object_container_host_os_name is not None:
pulumi.set(__self__, "protectable_object_container_host_os_name", protectable_object_container_host_os_name)
if protectable_object_friendly_name is not None:
pulumi.set(__self__, "protectable_object_friendly_name", protectable_object_friendly_name)
if protectable_object_parent_logical_container_name is not None:
pulumi.set(__self__, "protectable_object_parent_logical_container_name", protectable_object_parent_logical_container_name)
if protectable_object_protection_state is not None:
pulumi.set(__self__, "protectable_object_protection_state", protectable_object_protection_state)
if protectable_object_unique_name is not None:
pulumi.set(__self__, "protectable_object_unique_name", protectable_object_unique_name)
if protectable_object_workload_type is not None:
pulumi.set(__self__, "protectable_object_workload_type", protectable_object_workload_type)
if protection_container_id is not None:
pulumi.set(__self__, "protection_container_id", protection_container_id)
if protection_service_stamp_id is not None:
pulumi.set(__self__, "protection_service_stamp_id", protection_service_stamp_id)
if protection_service_stamp_uri is not None:
pulumi.set(__self__, "protection_service_stamp_uri", protection_service_stamp_uri)
if recovery_point_id is not None:
pulumi.set(__self__, "recovery_point_id", recovery_point_id)
if recovery_point_time is not None:
pulumi.set(__self__, "recovery_point_time", recovery_point_time)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if rp_is_managed_virtual_machine is not None:
pulumi.set(__self__, "rp_is_managed_virtual_machine", rp_is_managed_virtual_machine)
if rp_original_sa_option is not None:
pulumi.set(__self__, "rp_original_sa_option", rp_original_sa_option)
if rp_tier_information is not None:
pulumi.set(__self__, "rp_tier_information", rp_tier_information)
if rp_vm_size_description is not None:
pulumi.set(__self__, "rp_vm_size_description", rp_vm_size_description)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if token_extended_information is not None:
pulumi.set(__self__, "token_extended_information", token_extended_information)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> str:
"""
Type of the specific object - used for deserializing
Expected value is 'WorkloadCrrAccessToken'.
"""
return pulumi.get(self, "object_type")
@property
@pulumi.getter(name="accessTokenString")
def access_token_string(self) -> Optional[str]:
"""
Access token used for authentication
"""
return pulumi.get(self, "access_token_string")
@property
@pulumi.getter(name="bMSActiveRegion")
def b_ms_active_region(self) -> Optional[str]:
"""
Active region name of BMS Stamp
"""
return pulumi.get(self, "b_ms_active_region")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Backup Management Type
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[str]:
"""
Container Id
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container Unique name
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Container Type
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="coordinatorServiceStampId")
def coordinator_service_stamp_id(self) -> Optional[str]:
"""
CoordinatorServiceStampId to be used by BCM in restore call
"""
return pulumi.get(self, "coordinator_service_stamp_id")
@property
@pulumi.getter(name="coordinatorServiceStampUri")
def coordinator_service_stamp_uri(self) -> Optional[str]:
"""
CoordinatorServiceStampUri to be used by BCM in restore call
"""
return pulumi.get(self, "coordinator_service_stamp_uri")
@property
@pulumi.getter(name="datasourceContainerName")
def datasource_container_name(self) -> Optional[str]:
"""
Datasource Container Unique Name
"""
return pulumi.get(self, "datasource_container_name")
@property
@pulumi.getter(name="datasourceId")
def datasource_id(self) -> Optional[str]:
"""
Datasource Id
"""
return pulumi.get(self, "datasource_id")
@property
@pulumi.getter(name="datasourceName")
def datasource_name(self) -> Optional[str]:
"""
Datasource Friendly Name
"""
return pulumi.get(self, "datasource_name")
@property
@pulumi.getter(name="datasourceType")
def datasource_type(self) -> Optional[str]:
"""
Datasource Type
"""
return pulumi.get(self, "datasource_type")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
Policy Id
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[str]:
"""
Policy Name
"""
return pulumi.get(self, "policy_name")
@property
@pulumi.getter(name="protectableObjectContainerHostOsName")
def protectable_object_container_host_os_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_container_host_os_name")
@property
@pulumi.getter(name="protectableObjectFriendlyName")
def protectable_object_friendly_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_friendly_name")
@property
@pulumi.getter(name="protectableObjectParentLogicalContainerName")
def protectable_object_parent_logical_container_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_parent_logical_container_name")
@property
@pulumi.getter(name="protectableObjectProtectionState")
def protectable_object_protection_state(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_protection_state")
@property
@pulumi.getter(name="protectableObjectUniqueName")
def protectable_object_unique_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_unique_name")
@property
@pulumi.getter(name="protectableObjectWorkloadType")
def protectable_object_workload_type(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_workload_type")
@property
@pulumi.getter(name="protectionContainerId")
def protection_container_id(self) -> Optional[float]:
"""
Protected item container id
"""
return pulumi.get(self, "protection_container_id")
@property
@pulumi.getter(name="protectionServiceStampId")
def protection_service_stamp_id(self) -> Optional[str]:
"""
ProtectionServiceStampId to be used by BCM in restore call
"""
return pulumi.get(self, "protection_service_stamp_id")
@property
@pulumi.getter(name="protectionServiceStampUri")
def protection_service_stamp_uri(self) -> Optional[str]:
"""
ProtectionServiceStampUri to be used by BCM in restore call
"""
return pulumi.get(self, "protection_service_stamp_uri")
@property
@pulumi.getter(name="recoveryPointId")
def recovery_point_id(self) -> Optional[str]:
"""
Recovery Point Id
"""
return pulumi.get(self, "recovery_point_id")
@property
@pulumi.getter(name="recoveryPointTime")
def recovery_point_time(self) -> Optional[str]:
"""
Recovery Point Time
"""
return pulumi.get(self, "recovery_point_time")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[str]:
"""
Resource Group name of the source vault
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
Resource Id of the source vault
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[str]:
"""
Resource Name of the source vault
"""
return pulumi.get(self, "resource_name")
@property
@pulumi.getter(name="rpIsManagedVirtualMachine")
def rp_is_managed_virtual_machine(self) -> Optional[bool]:
"""
Recovery point information: Managed virtual machine
"""
return pulumi.get(self, "rp_is_managed_virtual_machine")
@property
@pulumi.getter(name="rpOriginalSAOption")
def rp_original_sa_option(self) -> Optional[bool]:
"""
Recovery point information: Original SA option
"""
return pulumi.get(self, "rp_original_sa_option")
@property
@pulumi.getter(name="rpTierInformation")
def rp_tier_information(self) -> Optional[Mapping[str, str]]:
"""
Recovery point Tier Information
"""
return pulumi.get(self, "rp_tier_information")
@property
@pulumi.getter(name="rpVMSizeDescription")
def rp_vm_size_description(self) -> Optional[str]:
"""
Recovery point information: VM size description
"""
return pulumi.get(self, "rp_vm_size_description")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription Id of the source vault
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="tokenExtendedInformation")
def token_extended_information(self) -> Optional[str]:
"""
Extended Information about the token like FileSpec etc.
"""
return pulumi.get(self, "token_extended_information")
| 41.794749 | 134 | 0.672624 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'WorkloadCrrAccessTokenResponse',
]
@pulumi.output_type
class WorkloadCrrAccessTokenResponse(dict):
def __init__(__self__, *,
object_type: str,
access_token_string: Optional[str] = None,
b_ms_active_region: Optional[str] = None,
backup_management_type: Optional[str] = None,
container_id: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
coordinator_service_stamp_id: Optional[str] = None,
coordinator_service_stamp_uri: Optional[str] = None,
datasource_container_name: Optional[str] = None,
datasource_id: Optional[str] = None,
datasource_name: Optional[str] = None,
datasource_type: Optional[str] = None,
policy_id: Optional[str] = None,
policy_name: Optional[str] = None,
protectable_object_container_host_os_name: Optional[str] = None,
protectable_object_friendly_name: Optional[str] = None,
protectable_object_parent_logical_container_name: Optional[str] = None,
protectable_object_protection_state: Optional[str] = None,
protectable_object_unique_name: Optional[str] = None,
protectable_object_workload_type: Optional[str] = None,
protection_container_id: Optional[float] = None,
protection_service_stamp_id: Optional[str] = None,
protection_service_stamp_uri: Optional[str] = None,
recovery_point_id: Optional[str] = None,
recovery_point_time: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_id: Optional[str] = None,
resource_name: Optional[str] = None,
rp_is_managed_virtual_machine: Optional[bool] = None,
rp_original_sa_option: Optional[bool] = None,
rp_tier_information: Optional[Mapping[str, str]] = None,
rp_vm_size_description: Optional[str] = None,
subscription_id: Optional[str] = None,
token_extended_information: Optional[str] = None):
pulumi.set(__self__, "object_type", 'WorkloadCrrAccessToken')
if access_token_string is not None:
pulumi.set(__self__, "access_token_string", access_token_string)
if b_ms_active_region is not None:
pulumi.set(__self__, "b_ms_active_region", b_ms_active_region)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_type is not None:
pulumi.set(__self__, "container_type", container_type)
if coordinator_service_stamp_id is not None:
pulumi.set(__self__, "coordinator_service_stamp_id", coordinator_service_stamp_id)
if coordinator_service_stamp_uri is not None:
pulumi.set(__self__, "coordinator_service_stamp_uri", coordinator_service_stamp_uri)
if datasource_container_name is not None:
pulumi.set(__self__, "datasource_container_name", datasource_container_name)
if datasource_id is not None:
pulumi.set(__self__, "datasource_id", datasource_id)
if datasource_name is not None:
pulumi.set(__self__, "datasource_name", datasource_name)
if datasource_type is not None:
pulumi.set(__self__, "datasource_type", datasource_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
if protectable_object_container_host_os_name is not None:
pulumi.set(__self__, "protectable_object_container_host_os_name", protectable_object_container_host_os_name)
if protectable_object_friendly_name is not None:
pulumi.set(__self__, "protectable_object_friendly_name", protectable_object_friendly_name)
if protectable_object_parent_logical_container_name is not None:
pulumi.set(__self__, "protectable_object_parent_logical_container_name", protectable_object_parent_logical_container_name)
if protectable_object_protection_state is not None:
pulumi.set(__self__, "protectable_object_protection_state", protectable_object_protection_state)
if protectable_object_unique_name is not None:
pulumi.set(__self__, "protectable_object_unique_name", protectable_object_unique_name)
if protectable_object_workload_type is not None:
pulumi.set(__self__, "protectable_object_workload_type", protectable_object_workload_type)
if protection_container_id is not None:
pulumi.set(__self__, "protection_container_id", protection_container_id)
if protection_service_stamp_id is not None:
pulumi.set(__self__, "protection_service_stamp_id", protection_service_stamp_id)
if protection_service_stamp_uri is not None:
pulumi.set(__self__, "protection_service_stamp_uri", protection_service_stamp_uri)
if recovery_point_id is not None:
pulumi.set(__self__, "recovery_point_id", recovery_point_id)
if recovery_point_time is not None:
pulumi.set(__self__, "recovery_point_time", recovery_point_time)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if rp_is_managed_virtual_machine is not None:
pulumi.set(__self__, "rp_is_managed_virtual_machine", rp_is_managed_virtual_machine)
if rp_original_sa_option is not None:
pulumi.set(__self__, "rp_original_sa_option", rp_original_sa_option)
if rp_tier_information is not None:
pulumi.set(__self__, "rp_tier_information", rp_tier_information)
if rp_vm_size_description is not None:
pulumi.set(__self__, "rp_vm_size_description", rp_vm_size_description)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if token_extended_information is not None:
pulumi.set(__self__, "token_extended_information", token_extended_information)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> str:
return pulumi.get(self, "object_type")
@property
@pulumi.getter(name="accessTokenString")
def access_token_string(self) -> Optional[str]:
return pulumi.get(self, "access_token_string")
@property
@pulumi.getter(name="bMSActiveRegion")
def b_ms_active_region(self) -> Optional[str]:
return pulumi.get(self, "b_ms_active_region")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[str]:
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="coordinatorServiceStampId")
def coordinator_service_stamp_id(self) -> Optional[str]:
return pulumi.get(self, "coordinator_service_stamp_id")
@property
@pulumi.getter(name="coordinatorServiceStampUri")
def coordinator_service_stamp_uri(self) -> Optional[str]:
return pulumi.get(self, "coordinator_service_stamp_uri")
@property
@pulumi.getter(name="datasourceContainerName")
def datasource_container_name(self) -> Optional[str]:
return pulumi.get(self, "datasource_container_name")
@property
@pulumi.getter(name="datasourceId")
def datasource_id(self) -> Optional[str]:
return pulumi.get(self, "datasource_id")
@property
@pulumi.getter(name="datasourceName")
def datasource_name(self) -> Optional[str]:
return pulumi.get(self, "datasource_name")
@property
@pulumi.getter(name="datasourceType")
def datasource_type(self) -> Optional[str]:
return pulumi.get(self, "datasource_type")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[str]:
return pulumi.get(self, "policy_name")
@property
@pulumi.getter(name="protectableObjectContainerHostOsName")
def protectable_object_container_host_os_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_container_host_os_name")
@property
@pulumi.getter(name="protectableObjectFriendlyName")
def protectable_object_friendly_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_friendly_name")
@property
@pulumi.getter(name="protectableObjectParentLogicalContainerName")
def protectable_object_parent_logical_container_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_parent_logical_container_name")
@property
@pulumi.getter(name="protectableObjectProtectionState")
def protectable_object_protection_state(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_protection_state")
@property
@pulumi.getter(name="protectableObjectUniqueName")
def protectable_object_unique_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_unique_name")
@property
@pulumi.getter(name="protectableObjectWorkloadType")
def protectable_object_workload_type(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_workload_type")
@property
@pulumi.getter(name="protectionContainerId")
def protection_container_id(self) -> Optional[float]:
return pulumi.get(self, "protection_container_id")
@property
@pulumi.getter(name="protectionServiceStampId")
def protection_service_stamp_id(self) -> Optional[str]:
return pulumi.get(self, "protection_service_stamp_id")
@property
@pulumi.getter(name="protectionServiceStampUri")
def protection_service_stamp_uri(self) -> Optional[str]:
return pulumi.get(self, "protection_service_stamp_uri")
@property
@pulumi.getter(name="recoveryPointId")
def recovery_point_id(self) -> Optional[str]:
return pulumi.get(self, "recovery_point_id")
@property
@pulumi.getter(name="recoveryPointTime")
def recovery_point_time(self) -> Optional[str]:
return pulumi.get(self, "recovery_point_time")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[str]:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[str]:
return pulumi.get(self, "resource_name")
@property
@pulumi.getter(name="rpIsManagedVirtualMachine")
def rp_is_managed_virtual_machine(self) -> Optional[bool]:
return pulumi.get(self, "rp_is_managed_virtual_machine")
@property
@pulumi.getter(name="rpOriginalSAOption")
def rp_original_sa_option(self) -> Optional[bool]:
return pulumi.get(self, "rp_original_sa_option")
@property
@pulumi.getter(name="rpTierInformation")
def rp_tier_information(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "rp_tier_information")
@property
@pulumi.getter(name="rpVMSizeDescription")
def rp_vm_size_description(self) -> Optional[str]:
return pulumi.get(self, "rp_vm_size_description")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="tokenExtendedInformation")
def token_extended_information(self) -> Optional[str]:
return pulumi.get(self, "token_extended_information")
| true | true |
7901891fab450fcfb03bc8eb0dc2bfbb1fba0b44 | 1,181 | py | Python | djangocms_tacc_section/cms_plugins.py | tacc-wbomar/Core-CMS-Plugin-Section | 9b5f652c6e01e46df5d5caa09cfa9ea6663823d0 | [
"BSD-2-Clause"
] | null | null | null | djangocms_tacc_section/cms_plugins.py | tacc-wbomar/Core-CMS-Plugin-Section | 9b5f652c6e01e46df5d5caa09cfa9ea6663823d0 | [
"BSD-2-Clause"
] | null | null | null | djangocms_tacc_section/cms_plugins.py | tacc-wbomar/Core-CMS-Plugin-Section | 9b5f652c6e01e46df5d5caa09cfa9ea6663823d0 | [
"BSD-2-Clause"
] | null | null | null | from djangocms_style.cms_plugins import StylePlugin
from cms.plugin_pool import plugin_pool
from django.utils.translation import gettext_lazy as _
from .models import TaccsiteSection
# Plugins
@plugin_pool.register_plugin
class TaccsiteSectionPlugin(StylePlugin):
"""
Patterns > "Section" Plugin
https://confluence.tacc.utexas.edu/x/c5TtDg
"""
module = 'TACC Site'
model = TaccsiteSection
name = _('Section')
# Copied from djangocms_style sans 'Inline style settings'
# FAQ: If user wants to override spacing, they may:
# - use Style plugin (if they have permission)
# - request Design & Dev standardize use case
# https://github.com/django-cms/djangocms-style/blob/3.0.0/djangocms_style/cms_plugins.py#L15-L40
fieldsets = (
(None, {
'fields': (
'label',
('class_name', 'tag_type'),
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'additional_classes',
'id_name',
'template',
'attributes',
),
}),
)
| 28.119048 | 101 | 0.58171 | from djangocms_style.cms_plugins import StylePlugin
from cms.plugin_pool import plugin_pool
from django.utils.translation import gettext_lazy as _
from .models import TaccsiteSection
@plugin_pool.register_plugin
class TaccsiteSectionPlugin(StylePlugin):
module = 'TACC Site'
model = TaccsiteSection
name = _('Section')
ldsets = (
(None, {
'fields': (
'label',
('class_name', 'tag_type'),
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'additional_classes',
'id_name',
'template',
'attributes',
),
}),
)
| true | true |
79018926c625feed767a46b117b2755cd86f2c6a | 9,602 | py | Python | tests/test_transition.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | null | null | null | tests/test_transition.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | null | null | null | tests/test_transition.py | kishorehariram/django-logic | 955f18211443b30ce39a845495e136d7590183a6 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django_logic.state import State
from django_logic.transition import Transition
from tests.models import Invoice
def disable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = False
invoice.save()
def update_invoice(invoice, is_available, customer_received, *args, **kwargs):
invoice.is_available = is_available
invoice.customer_received = customer_received
invoice.save()
def enable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = True
invoice.save()
def fail_invoice(invoice: Invoice, *args, **kwargs):
raise Exception
def receive_invoice(invoice: Invoice, *args, **kwargs):
invoice.customer_received = True
invoice.save()
def debug_action(*args, **kwargs):
pass
class TransitionSideEffectsTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_side_effect(self):
transition = Transition('test', sources=[], target='cancelled', side_effects=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_side_effects(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'draft')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_side_effect_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='cancelled', callbacks=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'cancelled')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionFailureCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice, receive_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
@patch('tests.test_transition.debug_action')
def test_failure_callback_exception_passed(self, debug_mock):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[debug_action])
self.invoice.refresh_from_db()
state = State(self.invoice, 'status')
transition.change_state(state, foo="bar")
self.assertTrue(debug_mock.called)
self.assertEqual(debug_mock.call_count, 1)
call_args = debug_mock.call_args[0]
call_kwargs = debug_mock.call_args[1]
self.assertEqual(call_args, (self.invoice,))
self.assertEqual(len(call_kwargs), 2)
self.assertTrue(isinstance(call_kwargs['exception'], Exception))
self.assertEqual(call_kwargs['foo'], 'bar')
| 45.507109 | 108 | 0.687669 | from unittest.mock import patch
from django.test import TestCase
from django_logic.state import State
from django_logic.transition import Transition
from tests.models import Invoice
def disable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = False
invoice.save()
def update_invoice(invoice, is_available, customer_received, *args, **kwargs):
invoice.is_available = is_available
invoice.customer_received = customer_received
invoice.save()
def enable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = True
invoice.save()
def fail_invoice(invoice: Invoice, *args, **kwargs):
raise Exception
def receive_invoice(invoice: Invoice, *args, **kwargs):
invoice.customer_received = True
invoice.save()
def debug_action(*args, **kwargs):
pass
class TransitionSideEffectsTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_side_effect(self):
transition = Transition('test', sources=[], target='cancelled', side_effects=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_side_effects(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'draft')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_side_effect_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='cancelled', callbacks=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'cancelled')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionFailureCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice, receive_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
@patch('tests.test_transition.debug_action')
def test_failure_callback_exception_passed(self, debug_mock):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[debug_action])
self.invoice.refresh_from_db()
state = State(self.invoice, 'status')
transition.change_state(state, foo="bar")
self.assertTrue(debug_mock.called)
self.assertEqual(debug_mock.call_count, 1)
call_args = debug_mock.call_args[0]
call_kwargs = debug_mock.call_args[1]
self.assertEqual(call_args, (self.invoice,))
self.assertEqual(len(call_kwargs), 2)
self.assertTrue(isinstance(call_kwargs['exception'], Exception))
self.assertEqual(call_kwargs['foo'], 'bar')
| true | true |
79018a4edf7335e2c6b90bf59e8f83a975bfb1c9 | 1,625 | py | Python | Head/typer.py | D3crypT0r/D3crypt | 1e8b0a61e604442590d72c7df05921384584968e | [
"MIT"
] | null | null | null | Head/typer.py | D3crypT0r/D3crypt | 1e8b0a61e604442590d72c7df05921384584968e | [
"MIT"
] | null | null | null | Head/typer.py | D3crypT0r/D3crypt | 1e8b0a61e604442590d72c7df05921384584968e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import tty
import termios
from Head.d3crypt import ghost
class typer:
def __init__(self):
self.d3crypt = d3crypt()
def get_char(self):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
def send_char(self, char):
self.ghost.send_command("shell", "input text " + char, False, False)
| 33.854167 | 80 | 0.720615 |
import sys
import tty
import termios
from Head.d3crypt import ghost
class typer:
def __init__(self):
self.d3crypt = d3crypt()
def get_char(self):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
def send_char(self, char):
self.ghost.send_command("shell", "input text " + char, False, False)
| true | true |
79018a7b4549f9cc20a13df03126349d8fd41ee5 | 5,524 | py | Python | api/dailymed/tests/test_api.py | coderxio/dailymed-api | 90fe5f8b40a854ff7543ec9b8c4fc0232fdd0dfc | [
"MIT"
] | 10 | 2020-09-23T14:13:35.000Z | 2022-03-01T17:39:23.000Z | api/dailymed/tests/test_api.py | coderxio/dailymed-api | 90fe5f8b40a854ff7543ec9b8c4fc0232fdd0dfc | [
"MIT"
] | 30 | 2020-09-04T14:43:35.000Z | 2021-01-24T01:17:08.000Z | api/dailymed/tests/test_api.py | coderxio/dailymed-api | 90fe5f8b40a854ff7543ec9b8c4fc0232fdd0dfc | [
"MIT"
] | 3 | 2020-12-22T01:49:32.000Z | 2022-02-10T01:56:05.000Z | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from dailymed.models import Set, Spl, InactiveIngredient
from dailymed.serializers import SplSerializer
import json
from pathlib import Path
SPL_URL = reverse('spl-list')
PRODUCT_URL = reverse('product-list')
PACKAGE_URL = reverse('package-list')
class PublicApiTest(TestCase):
"""Test public daily med API"""
def setUp(self):
self.client = APIClient()
"""Creates sample data for database"""
cwd = Path(__file__).parent.absolute()
with open(f'{cwd}/test.json', 'r') as f:
default = json.load(f)
for data in default['results']:
set_id = data.pop('set_id')
products_data = data.pop('products')
set_obj = Set.objects.create(id=set_id)
spl_obj = set_obj.spls.create(**data)
for product_data in products_data:
product_data.pop('name')
packages_data = product_data.pop('packages')
if 'inactive_ingredients' in product_data:
inactive_ingredients_data = product_data\
.pop('inactive_ingredients')
inactive_ingredients_list = []
for inactive_ingredient_data in inactive_ingredients_data:
try:
ingredient = InactiveIngredient.objects.get(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
except Exception:
ingredient = InactiveIngredient.objects.create(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
product_obj = spl_obj.products.create(**product_data)
product_obj.inactive_ingredients\
.add(*inactive_ingredients_list)
for package_data in packages_data:
product_obj.packages.create(**package_data)
def test_retrieve_spls(self):
"""Test retrieving spls"""
res = self.client.get(
SPL_URL,
format='json'
)
serializer = SplSerializer(Spl.objects.filter(), many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_set(self):
"""Test retrieving a spl by set filter"""
set_id = Set.objects.first()
res = self.client.get(
SPL_URL,
{'set_id': set_id.id},
format='json')
serializer = SplSerializer(
Spl.objects.filter(set__id=set_id.id), many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_inactive_ing(self):
"""Test retrieving a spl by inactive ingredient filter"""
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'inactive_ingredient_name': inactive_ing},
format='json')
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_schedule(self):
"""Test retrieving spls by schedule filter"""
schedule = 'CIV'
res = self.client.get(
SPL_URL,
{'schedule': schedule},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__schedule=schedule).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_drug_name(self):
"""Test retrieving spls by drug name filter"""
name = 'Ciprofloxacin'
res = self.client.get(
SPL_URL,
{'product_name': name},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__name=name).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_complex(self):
"""Test retrieving spls filtered by set & inactive ingredient"""
set_id = 'b88efb93-f1d1-4606-a669-6896f432a27f'
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'set_id': set_id,
'inactive_ingredient_name': inactive_ing},
format='json'
)
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing,
set__id=set_id)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['results']), 1)
self.assertEqual(serializer.data, res.data['results'])
| 33.478788 | 78 | 0.589971 | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from dailymed.models import Set, Spl, InactiveIngredient
from dailymed.serializers import SplSerializer
import json
from pathlib import Path
SPL_URL = reverse('spl-list')
PRODUCT_URL = reverse('product-list')
PACKAGE_URL = reverse('package-list')
class PublicApiTest(TestCase):
def setUp(self):
self.client = APIClient()
cwd = Path(__file__).parent.absolute()
with open(f'{cwd}/test.json', 'r') as f:
default = json.load(f)
for data in default['results']:
set_id = data.pop('set_id')
products_data = data.pop('products')
set_obj = Set.objects.create(id=set_id)
spl_obj = set_obj.spls.create(**data)
for product_data in products_data:
product_data.pop('name')
packages_data = product_data.pop('packages')
if 'inactive_ingredients' in product_data:
inactive_ingredients_data = product_data\
.pop('inactive_ingredients')
inactive_ingredients_list = []
for inactive_ingredient_data in inactive_ingredients_data:
try:
ingredient = InactiveIngredient.objects.get(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
except Exception:
ingredient = InactiveIngredient.objects.create(
**inactive_ingredient_data
)
inactive_ingredients_list.append(ingredient)
product_obj = spl_obj.products.create(**product_data)
product_obj.inactive_ingredients\
.add(*inactive_ingredients_list)
for package_data in packages_data:
product_obj.packages.create(**package_data)
def test_retrieve_spls(self):
res = self.client.get(
SPL_URL,
format='json'
)
serializer = SplSerializer(Spl.objects.filter(), many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_set(self):
set_id = Set.objects.first()
res = self.client.get(
SPL_URL,
{'set_id': set_id.id},
format='json')
serializer = SplSerializer(
Spl.objects.filter(set__id=set_id.id), many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_inactive_ing(self):
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'inactive_ingredient_name': inactive_ing},
format='json')
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_schedule(self):
schedule = 'CIV'
res = self.client.get(
SPL_URL,
{'schedule': schedule},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__schedule=schedule).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_drug_name(self):
name = 'Ciprofloxacin'
res = self.client.get(
SPL_URL,
{'product_name': name},
format='json')
serializer = SplSerializer(Spl.objects.filter(
products__name=name).distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, res.data['results'])
def test_retrieve_spls_filter_by_complex(self):
set_id = 'b88efb93-f1d1-4606-a669-6896f432a27f'
inactive_ing = 'alcohol'
res = self.client.get(
SPL_URL,
{'set_id': set_id,
'inactive_ingredient_name': inactive_ing},
format='json'
)
serializer = SplSerializer(
Spl.objects.filter(
products__inactive_ingredients__name__icontains=inactive_ing,
set__id=set_id)
.distinct(),
many=True
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['results']), 1)
self.assertEqual(serializer.data, res.data['results'])
| true | true |
79018a9ee37928d5ce7164c02c528c9f2c624d84 | 16,071 | py | Python | botorch/acquisition/monte_carlo.py | BradyBromley/botorch | ea7f8fa2cead9c581309437a1f2f59ed070cb59e | [
"MIT"
] | 1 | 2020-07-21T21:25:16.000Z | 2020-07-21T21:25:16.000Z | botorch/acquisition/monte_carlo.py | zpao/botorch | 270599207f5b9bf8c66e1197ad2632bb69c3d3b9 | [
"MIT"
] | null | null | null | botorch/acquisition/monte_carlo.py | zpao/botorch | 270599207f5b9bf8c66e1197ad2632bb69c3d3b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
| 39.197561 | 86 | 0.62759 |
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
pass
class qExpectedImprovement(MCAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
| true | true |
79018b8a2227af6bfbfbee3c81e94b7f005b0dbb | 923 | py | Python | ml/rl/test/models/test_sequence_model.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | 2 | 2021-01-11T18:16:32.000Z | 2021-11-30T09:34:58.000Z | ml/rl/test/models/test_sequence_model.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | null | null | null | ml/rl/test/models/test_sequence_model.py | joshrose/Horizon | a2eb407b31a16560ae78aa6751eb83672a122a7e | [
"BSD-3-Clause"
] | 2 | 2021-01-06T01:06:50.000Z | 2021-06-24T01:12:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
from ml.rl.models.example_sequence_model import ExampleSequenceModel
from ml.rl.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
class TestExampleSequenceModel(unittest.TestCase):
def test_basic(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
input = model.input_prototype()
output = model(input)
self.assertEqual((1, 1), output.value.shape)
def test_save_load(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
# ONNX sure exports a lot of parameters...
expected_num_params, expected_num_inputs, expected_num_outputs = 133, 3, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
| 30.766667 | 87 | 0.713976 |
import logging
import unittest
from ml.rl.models.example_sequence_model import ExampleSequenceModel
from ml.rl.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
class TestExampleSequenceModel(unittest.TestCase):
def test_basic(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
input = model.input_prototype()
output = model(input)
self.assertEqual((1, 1), output.value.shape)
def test_save_load(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
expected_num_params, expected_num_inputs, expected_num_outputs = 133, 3, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
| true | true |
79018bc4d2b547771235d267e6304eb5b3a7f9bc | 5,093 | py | Python | RA_project/code_python/image_score_posi.py | erialc-cal/NLP-FOMC | 2a8ad113a87e79f5d7beefa6cfd4653f445c92d5 | [
"MIT"
] | null | null | null | RA_project/code_python/image_score_posi.py | erialc-cal/NLP-FOMC | 2a8ad113a87e79f5d7beefa6cfd4653f445c92d5 | [
"MIT"
] | null | null | null | RA_project/code_python/image_score_posi.py | erialc-cal/NLP-FOMC | 2a8ad113a87e79f5d7beefa6cfd4653f445c92d5 | [
"MIT"
] | null | null | null | import pandas as pd
import datetime
import matplotlib.pyplot as plt
import ast
from gensim.parsing.preprocessing import STOPWORDS
from nltk.corpus import stopwords
from collections import defaultdict
from nltk.stem import WordNetLemmatizer
import datetime
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
"""
Dates and dico
"""
df_sentiment = pd.read_excel('/Users/etiennelenaour/Desktop/Stage/vocab_sentiment.xlsx')
project_directory = '/Users/etiennelenaour/Desktop/Stage/'
l_month = ['January','February','March','April','May','June','July','August','September','October','November','December']
l_dates = list()
with open ('/Users/etiennelenaour/Desktop/Stage/csv_files/dates_fomc.csv', 'r') as doc :
head = doc.readline()
dates = doc.readlines()
dates_to_chg = []
for line in dates :
if line.split(',')[1] == ' Y' :
dates_to_chg += [line.split(';')[0]]
date = 0
m = 1
for month in l_month :
if month[:3] == line.split(';')[0].split('/')[0] :
date += 100 * m
m += 1
date += int(line.split(',')[0].split('/')[2])*10000
date += int(line.split(',')[0].split('/')[1])
l_dates.append(date)
l_dates_final = l_dates[101:]
date_to_append = [20120125, 20120425, 20120620, 20120801, 20120913, 20121024, 20121212, 20130130,
20130130, 20130320, 20130501, 20130619, 20130918, 20131030, 20131218, 20140129,
20140129, 20140430, 20140618, 20140917, 20141029, 20141217]
for date in date_to_append:
l_dates_final.append(date)
"""
cleaning functions
"""
def clean_dico_new_line(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
new_liste = [string.rstrip("\\n").lower() for string in list_dico]
new_dico[keys] = new_liste
return new_dico
def remove_stop_word(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
final_list = list()
for ele in list_dico:
if (ele not in STOPWORDS) and (ele not in stop_words):
final_list.append(ele)
new_dico[keys] = final_list
return new_dico
def remove_nan_from_list(liste):
new_liste = list()
for ele in liste:
if type(ele) == str:
new_liste.append(ele)
else:
pass
return new_liste
"""
Score functions
"""
negative_word_list = [ele.lower() for ele in df_sentiment.Negative.tolist()]
positive_word_list = [ele.lower() for ele in remove_nan_from_list(df_sentiment.Positive.tolist())]
def compute_positivity(dico):
""" This computes the positivity score of each statement.
Takes a dictionary with each statement as liste item and the corresponding interlocutor's name in names item
"""
dico_score = defaultdict(lambda: list())
for name, liste in dico.items():
neg_score = 0
pos_score = 0
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
if neg_score < 30 or pos_score < 30:
pass
else:
score = (pos_score - neg_score) / (pos_score + neg_score)
dico_score[name] = score
return dico_score
def compute_mean_positivity(dico):
neg_score = 0
pos_score = 0
for liste in dico.values():
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
score = (pos_score - neg_score) / (pos_score + neg_score)
return score
"""
Date function
"""
def from_int_dates(integ):
string = str(integ)
new_string = string[0]+ string[1] + string[2] + string[3] + "/" + string[4] + string[5] + "/" + string[6] + string[7]
return datetime.datetime.strptime(new_string, "%Y/%m/%d")
"""
plot positivity
"""
def plot_positivity_persons(date, dico_score, score_moyen):
list_score = list()
list_names = list()
for name, score in dico_score.items():
list_score.append(score)
list_names.append(name)
plt.bar(list_names, list_score, color='r')
plt.grid()
plt.xticks(rotation=90)
plt.text(-1, 0, date, horizontalalignment='left', verticalalignment='top', fontweight='bold')
plt.hlines(y=score_moyen, xmin = -1, xmax = len(list_names))
plt.ylabel("Score de positivité")
plt.title("Score de positivité des principaux speakers")
plt.tight_layout()
#plt.show()
plt.savefig(project_directory + 'image_score_posi/' + 'score_posi_' + str(date) + '.png')
plt.close()
return None
"""
Main
"""
for date in l_dates_final[-50:]:
with open (project_directory+'sentences_by_names/'+str(date)+'meeting.txt', 'r') as doc:
content = doc.readlines()[0]
dictionary = ast.literal_eval(content)
#Cleaning
dico_clean = remove_stop_word(clean_dico_new_line(dictionary))
plot_positivity_persons(date, compute_positivity(dico_clean), compute_mean_positivity(dico_clean))
| 20.703252 | 121 | 0.650697 | import pandas as pd
import datetime
import matplotlib.pyplot as plt
import ast
from gensim.parsing.preprocessing import STOPWORDS
from nltk.corpus import stopwords
from collections import defaultdict
from nltk.stem import WordNetLemmatizer
import datetime
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
df_sentiment = pd.read_excel('/Users/etiennelenaour/Desktop/Stage/vocab_sentiment.xlsx')
project_directory = '/Users/etiennelenaour/Desktop/Stage/'
l_month = ['January','February','March','April','May','June','July','August','September','October','November','December']
l_dates = list()
with open ('/Users/etiennelenaour/Desktop/Stage/csv_files/dates_fomc.csv', 'r') as doc :
head = doc.readline()
dates = doc.readlines()
dates_to_chg = []
for line in dates :
if line.split(',')[1] == ' Y' :
dates_to_chg += [line.split(';')[0]]
date = 0
m = 1
for month in l_month :
if month[:3] == line.split(';')[0].split('/')[0] :
date += 100 * m
m += 1
date += int(line.split(',')[0].split('/')[2])*10000
date += int(line.split(',')[0].split('/')[1])
l_dates.append(date)
l_dates_final = l_dates[101:]
date_to_append = [20120125, 20120425, 20120620, 20120801, 20120913, 20121024, 20121212, 20130130,
20130130, 20130320, 20130501, 20130619, 20130918, 20131030, 20131218, 20140129,
20140129, 20140430, 20140618, 20140917, 20141029, 20141217]
for date in date_to_append:
l_dates_final.append(date)
def clean_dico_new_line(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
new_liste = [string.rstrip("\\n").lower() for string in list_dico]
new_dico[keys] = new_liste
return new_dico
def remove_stop_word(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
final_list = list()
for ele in list_dico:
if (ele not in STOPWORDS) and (ele not in stop_words):
final_list.append(ele)
new_dico[keys] = final_list
return new_dico
def remove_nan_from_list(liste):
new_liste = list()
for ele in liste:
if type(ele) == str:
new_liste.append(ele)
else:
pass
return new_liste
negative_word_list = [ele.lower() for ele in df_sentiment.Negative.tolist()]
positive_word_list = [ele.lower() for ele in remove_nan_from_list(df_sentiment.Positive.tolist())]
def compute_positivity(dico):
dico_score = defaultdict(lambda: list())
for name, liste in dico.items():
neg_score = 0
pos_score = 0
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
if neg_score < 30 or pos_score < 30:
pass
else:
score = (pos_score - neg_score) / (pos_score + neg_score)
dico_score[name] = score
return dico_score
def compute_mean_positivity(dico):
neg_score = 0
pos_score = 0
for liste in dico.values():
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
score = (pos_score - neg_score) / (pos_score + neg_score)
return score
def from_int_dates(integ):
string = str(integ)
new_string = string[0]+ string[1] + string[2] + string[3] + "/" + string[4] + string[5] + "/" + string[6] + string[7]
return datetime.datetime.strptime(new_string, "%Y/%m/%d")
def plot_positivity_persons(date, dico_score, score_moyen):
list_score = list()
list_names = list()
for name, score in dico_score.items():
list_score.append(score)
list_names.append(name)
plt.bar(list_names, list_score, color='r')
plt.grid()
plt.xticks(rotation=90)
plt.text(-1, 0, date, horizontalalignment='left', verticalalignment='top', fontweight='bold')
plt.hlines(y=score_moyen, xmin = -1, xmax = len(list_names))
plt.ylabel("Score de positivité")
plt.title("Score de positivité des principaux speakers")
plt.tight_layout()
plt.savefig(project_directory + 'image_score_posi/' + 'score_posi_' + str(date) + '.png')
plt.close()
return None
for date in l_dates_final[-50:]:
with open (project_directory+'sentences_by_names/'+str(date)+'meeting.txt', 'r') as doc:
content = doc.readlines()[0]
dictionary = ast.literal_eval(content)
dico_clean = remove_stop_word(clean_dico_new_line(dictionary))
plot_positivity_persons(date, compute_positivity(dico_clean), compute_mean_positivity(dico_clean))
| true | true |
79018e109b0d2d3d27d91efe0ab7e0e7574f3780 | 8,692 | py | Python | docs/conf.py | SherazKhan/cortex | f0430d11cc81a64c78edda1a62513f6d739ab8e1 | [
"MIT"
] | null | null | null | docs/conf.py | SherazKhan/cortex | f0430d11cc81a64c78edda1a62513f6d739ab8e1 | [
"MIT"
] | null | null | null | docs/conf.py | SherazKhan/cortex | f0430d11cc81a64c78edda1a62513f6d739ab8e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../cortex")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cortex'
copyright = u'2017, sherazkhan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from cortex import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cortex-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'cortex Documentation',
u'sherazkhan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| 34.768 | 85 | 0.70168 |
import sys
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../cortex")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cortex'
copyright = u'2017, sherazkhan'
# The version info for the project you're documenting, acts as replacement for
version = ''
release = ''
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'alabaster'
try:
from cortex import __version__ as version
except ImportError:
pass
else:
release = version
html_static_path = ['_static']
htmlhelp_basename = 'cortex-doc'
latex_elements = {
}
latex_documents = [
('index', 'user_guide.tex', u'cortex Documentation',
u'sherazkhan', 'manual'),
]
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| true | true |
7901910f895ef5344f1b27738ca27ce8ce0d37e9 | 6,947 | py | Python | magenta/models/image_stylization/image_stylization_finetune.py | dubreuia/magenta | 2679a1a096001808957ad99a1859181f3926cfdf | [
"Apache-2.0"
] | 1 | 2019-11-29T15:18:32.000Z | 2019-11-29T15:18:32.000Z | magenta/models/image_stylization/image_stylization_finetune.py | hdanak/magenta | acd6dedc315ea159c6f15750dd09aabdadc47515 | [
"Apache-2.0"
] | null | null | null | magenta/models/image_stylization/image_stylization_finetune.py | hdanak/magenta | acd6dedc315ea159c6f15750dd09aabdadc47515 | [
"Apache-2.0"
] | 1 | 2021-09-22T18:37:38.000Z | 2021-09-22T18:37:38.000Z | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains an N-styles style transfer model on the cheap.
Training is done by finetuning the instance norm parameters of a pre-trained
N-styles style transfer model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
from magenta.models.image_stylization import vgg
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_float('alpha', 1.0, 'Width multiplier')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0,
'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 40000, 'Number of training steps.')
flags.DEFINE_string('checkpoint', None,
'Checkpoint file for the pretrained model.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
with tf.Graph().as_default():
# Force all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
worker_device=device)):
inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Load style images and select one at random (for each graph execution, a
# new random selection occurs)
_, style_labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
square_crop=True, shuffle=True)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and weight flags
num_styles = FLAGS.num_styles
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Rescale style weights dynamically based on the current style image
style_coefficient = tf.gather(
tf.constant(style_coefficients), style_labels)
style_weights = dict((key, style_coefficient * value)
for key, value in style_weights.items())
# Define the model
stylized_inputs = model.transform(
inputs,
alpha=FLAGS.alpha,
normalizer_params={
'labels': style_labels,
'num_categories': num_styles,
'center': True,
'scale': True
})
# Compute losses.
total_loss, loss_dict = learning.total_loss(
inputs, stylized_inputs, style_gram_matrices, content_weights,
style_weights)
for key, value in loss_dict.items():
tf.summary.scalar(key, value)
instance_norm_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' in var.name]
other_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' not in var.name]
# Function to restore VGG16 parameters.
init_fn_vgg = slim.assign_from_checkpoint_fn(vgg.checkpoint_file(),
slim.get_variables('vgg_16'))
# Function to restore N-styles parameters.
init_fn_n_styles = slim.assign_from_checkpoint_fn(
os.path.expanduser(FLAGS.checkpoint), other_vars)
def init_fn(session):
init_fn_vgg(session)
init_fn_n_styles(session)
# Set up training.
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
variables_to_train=instance_norm_vars, summarize_gradients=False)
# Run training.
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_fn,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 41.35119 | 80 | 0.678854 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
from magenta.models.image_stylization import vgg
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_float('alpha', 1.0, 'Width multiplier')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0,
'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 40000, 'Number of training steps.')
flags.DEFINE_string('checkpoint', None,
'Checkpoint file for the pretrained model.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
with tf.Graph().as_default():
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
worker_device=device)):
inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
_, style_labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
square_crop=True, shuffle=True)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
num_styles = FLAGS.num_styles
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
style_coefficient = tf.gather(
tf.constant(style_coefficients), style_labels)
style_weights = dict((key, style_coefficient * value)
for key, value in style_weights.items())
stylized_inputs = model.transform(
inputs,
alpha=FLAGS.alpha,
normalizer_params={
'labels': style_labels,
'num_categories': num_styles,
'center': True,
'scale': True
})
total_loss, loss_dict = learning.total_loss(
inputs, stylized_inputs, style_gram_matrices, content_weights,
style_weights)
for key, value in loss_dict.items():
tf.summary.scalar(key, value)
instance_norm_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' in var.name]
other_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' not in var.name]
init_fn_vgg = slim.assign_from_checkpoint_fn(vgg.checkpoint_file(),
slim.get_variables('vgg_16'))
init_fn_n_styles = slim.assign_from_checkpoint_fn(
os.path.expanduser(FLAGS.checkpoint), other_vars)
def init_fn(session):
init_fn_vgg(session)
init_fn_n_styles(session)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
variables_to_train=instance_norm_vars, summarize_gradients=False)
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_fn,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| true | true |
79019284060513350aab929d8bb76a390a968486 | 2,942 | py | Python | hmc5883lStream/code/quick2wire/test_gpio.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null | hmc5883lStream/code/quick2wire/test_gpio.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null | hmc5883lStream/code/quick2wire/test_gpio.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null |
import os
from quick2wire.gpio import pins, In, Out, PullDown, gpio_admin
import pytest
@pytest.mark.gpio
@pytest.mark.loopback
class TestGPIO:
def test_pin_must_be_opened_before_use_and_is_unusable_after_being_closed(self):
pin = pins.pin(0)
with pytest.raises(IOError):
pin.value
pin.open()
try:
pin.value
finally:
pin.close()
with pytest.raises(IOError):
pin.value
def test_opens_and_closes_itself_when_used_as_a_context_manager(self):
pin = pins.pin(0)
with pin:
pin.value
with pytest.raises(IOError):
pin.value
def test_exports_gpio_device_to_userspace_when_opened_and_unexports_when_closed(self):
with pins.pin(0) as pin:
assert os.path.exists('/sys/class/gpio/gpio17/value')
assert not os.path.exists('/sys/class/gpio/gpio17/value')
def test_can_set_and_query_direction_of_pin_when_open(self):
with pins.pin(0) as pin:
pin.direction = Out
assert pin.direction == Out
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
pin.direction = In
assert pin.direction == In
assert content_of("/sys/class/gpio/gpio17/direction") == "in\n"
def test_can_set_direction_on_construction(self):
pin = pins.pin(0, Out)
assert pin.direction == Out
assert not os.path.exists("/sys/class/gpio/gpio17/direction")
with pin:
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
assert pin.direction == Out
def test_setting_value_of_output_pin_writes_to_device_file(self):
with pins.pin(0) as pin:
pin.direction = Out
pin.value = 1
assert pin.value == 1
assert content_of('/sys/class/gpio/gpio17/value') == '1\n'
pin.value = 0
assert pin.value == 0
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
def test_direction_and_value_of_pin_is_reset_when_closed(self):
with pins.pin(0, Out) as pin:
pin.value = 1
gpio_admin("export", 17, PullDown)
try:
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
assert content_of('/sys/class/gpio/gpio17/direction') == 'in\n'
finally:
gpio_admin("unexport", 17)
def test_cannot_get_a_pin_with_an_invalid_index(self):
with pytest.raises(IndexError):
pins.pin(-1)
with pytest.raises(IndexError):
pins.pin(len(pins))
def content_of(filename):
with open(filename, 'r') as f:
return f.read()
| 28.563107 | 90 | 0.569001 |
import os
from quick2wire.gpio import pins, In, Out, PullDown, gpio_admin
import pytest
@pytest.mark.gpio
@pytest.mark.loopback
class TestGPIO:
def test_pin_must_be_opened_before_use_and_is_unusable_after_being_closed(self):
pin = pins.pin(0)
with pytest.raises(IOError):
pin.value
pin.open()
try:
pin.value
finally:
pin.close()
with pytest.raises(IOError):
pin.value
def test_opens_and_closes_itself_when_used_as_a_context_manager(self):
pin = pins.pin(0)
with pin:
pin.value
with pytest.raises(IOError):
pin.value
def test_exports_gpio_device_to_userspace_when_opened_and_unexports_when_closed(self):
with pins.pin(0) as pin:
assert os.path.exists('/sys/class/gpio/gpio17/value')
assert not os.path.exists('/sys/class/gpio/gpio17/value')
def test_can_set_and_query_direction_of_pin_when_open(self):
with pins.pin(0) as pin:
pin.direction = Out
assert pin.direction == Out
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
pin.direction = In
assert pin.direction == In
assert content_of("/sys/class/gpio/gpio17/direction") == "in\n"
def test_can_set_direction_on_construction(self):
pin = pins.pin(0, Out)
assert pin.direction == Out
assert not os.path.exists("/sys/class/gpio/gpio17/direction")
with pin:
assert content_of("/sys/class/gpio/gpio17/direction") == "out\n"
assert pin.direction == Out
def test_setting_value_of_output_pin_writes_to_device_file(self):
with pins.pin(0) as pin:
pin.direction = Out
pin.value = 1
assert pin.value == 1
assert content_of('/sys/class/gpio/gpio17/value') == '1\n'
pin.value = 0
assert pin.value == 0
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
def test_direction_and_value_of_pin_is_reset_when_closed(self):
with pins.pin(0, Out) as pin:
pin.value = 1
gpio_admin("export", 17, PullDown)
try:
assert content_of('/sys/class/gpio/gpio17/value') == '0\n'
assert content_of('/sys/class/gpio/gpio17/direction') == 'in\n'
finally:
gpio_admin("unexport", 17)
def test_cannot_get_a_pin_with_an_invalid_index(self):
with pytest.raises(IndexError):
pins.pin(-1)
with pytest.raises(IndexError):
pins.pin(len(pins))
def content_of(filename):
with open(filename, 'r') as f:
return f.read()
| true | true |
790192adbfddfd2c9c18cf72bae9e8e8538e918a | 10,194 | py | Python | assignment3/a3_mongo_queries_abw.py | ekselan/DS-Unit-3-Sprint-2-SQL-and-Databases | cde39e22d82362f4ebb771677ab838946c89bb52 | [
"MIT"
] | null | null | null | assignment3/a3_mongo_queries_abw.py | ekselan/DS-Unit-3-Sprint-2-SQL-and-Databases | cde39e22d82362f4ebb771677ab838946c89bb52 | [
"MIT"
] | null | null | null | assignment3/a3_mongo_queries_abw.py | ekselan/DS-Unit-3-Sprint-2-SQL-and-Databases | cde39e22d82362f4ebb771677ab838946c89bb52 | [
"MIT"
] | null | null | null | # inclass/mongo_queries.py
import pymongo
import os
from dotenv import load_dotenv
import sqlite3
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE"
print("----------------")
print("URI:", connection_uri)
client = pymongo.MongoClient(connection_uri)
print("----------------")
print("CLIENT:", type(client), client)
# print(dir(client))
# print("DB NAMES:", client.list_database_names()) #> ['admin', 'local']
db = client.ds14_db # "ds14_db" or whatever you want to call it
# print("----------------")
# print("DB:", type(db), db)
# collection = db.ds14_pokemon_collection # "ds14_collection" or whatever you want to call it
# print("----------------")
# print("COLLECTION:", type(collection), collection)
# print("----------------")
# # print("COLLECTIONS:")
# # print(db.list_collection_names())
# print("--------------------------------------")
################## ASSIGNMENT III #############################
# INSERT RPG DATA INTO MONGODB INSTANCE
# Create RPG database
db = client.rpg_data_db
# Establish sqlite3 connection to access rpg data
sl_conn = sqlite3.connect("data/rpg_db_original.sqlite3")
sl_curs = sl_conn.cursor()
################# CHARACTERS ###########################
# ## Create new collection for RPG data
# col_characters = db.character_collection
# ## Establish SQL syntax for query
# rpg_characters = 'SELECT * FROM charactercreator_character'
# # Function to loop through characters and return list of dictionaries
# def all_chars():
# query = rpg_characters
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_id": row[0],
# "name": row[1],
# "level": row[2],
# "exp": row[3],
# "hp": row[4],
# "strength": row[5],
# "intelligence": row[6],
# "dexterity": row[7],
# "wisdom": row[8]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# # print(character_dict_list)
# col_characters.insert_many(character_dict_list)
# print("DOCS(Num Characters):", col_characters.count_documents({})) #
# SELECT count(distinct id) from characters
################# MAGES ###########################
# col_mage = db.mage_collection
# mages = 'SELECT * FROM charactercreator_mage'
# def all_chars():
# query = mages
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "has_pet": row[1],
# "mana": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_mage.insert_many(character_dict_list)
# print("DOCS:", col_mage.count_documents({}))
################# THIEVES ###########################
# col_thief = db.thief_collection
# thieves = 'SELECT * FROM charactercreator_thief'
# def all_chars():
# query = thieves
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "is_sneaking": row[1],
# "energy": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_thief.insert_many(character_dict_list)
# print("DOCS:", col_thief.count_documents({}))
################# CLERICS ###########################
# col_cleric = db.cleric_collection
# clerics = 'SELECT * FROM charactercreator_cleric'
# def all_chars():
# query = clerics
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "using_shield": row[1],
# "mana": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_cleric.insert_many(character_dict_list)
# print("DOCS:", col_cleric.count_documents({}))
################# FIGHTERS ###########################
# col_fighter = db.fighter_collection
# fighters = 'SELECT * FROM charactercreator_fighter'
# def all_chars():
# query = fighters
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "using_shield": row[1],
# "rage": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_fighter.insert_many(character_dict_list)
# print("DOCS:", col_fighter.count_documents({}))
################# NECROMANCERS ###########################
# col_mancer = db.mancer_collection
# mancers = 'SELECT * FROM charactercreator_necromancer'
# def all_chars():
# query = mancers
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "mage_ptr_id": row[0],
# "talisman_charged": row[1],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_mancer.insert_many(character_dict_list)
# print("DOCS:", col_mancer.count_documents({}))
################# ITEMS ###########################
# col_items = db.items_collection
# items = 'SELECT * FROM armory_item'
# def all_chars():
# query = items
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "item_id": row[0],
# "name": row[1],
# "value": row[2],
# "weight": row[3]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_items.insert_many(character_dict_list)
# print("DOCS:", col_items.count_documents({}))
################# WEAPONS ###########################
# col_weapons = db.weapons_collection
# weapons = 'SELECT * FROM armory_weapon'
# def all_chars():
# query = weapons
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "item_ptr_id": row[0],
# "power": row[1]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_weapons.insert_many(character_dict_list)
# print("DOCS:", col_weapons.count_documents({}))
################# INVENTORY ###########################
# col_inventory = db.inventory_collection
# records = 'SELECT * FROM charactercreator_character_inventory'
# def all_chars():
# query = records
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "id": row[0],
# "character_id": row[1],
# "item_id": row[2]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_inventory.insert_many(character_dict_list)
# print("DOCS:", col_inventory.count_documents({}))
# print("COLLECTIONS:")
# print(db.list_collection_names())
#################### IN-CLASS POKEMON INSERTS #############################
# collection.insert_one({
# "name": "Pikachu",
# "level": 30,
# "exp": 76000000000,
# "hp": 400,
# "fav_icecream_flavors":["vanila_bean", "choc"],
# "stats":{"a":1,"b":2,"c":[1,2,3]}
# })
# print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon
# print(collection.count_documents({"name": "Pikachu"})) # SELECT
# count(distinct id) from pokemon WHERE name = "Pikachu"
# mewtwo = {
# "name": "Mewtwo",
# "level": 100,
# "exp": 76000000000,
# "hp": 450,
# "strength": 550,
# "intelligence": 450,
# "dexterity": 300,
# "wisdom": 575
# }
# blastoise = {
# "name": "Blastoise",
# "lvl": 70, # OOPS we made a mistake with the structure of this dict
# }
# charmander = {
# "nameeeeeee": "Charmander",
# "level": 70,
# "random_stat": {"a":2}
# }
# skarmory = {
# "name": "Skarmory",
# "level": 22,
# "exp": 42000,
# "hp": 85,
# "strength": 750,
# "intelligence": 8,
# "dexterity": 57
# }
# cubone = {
# "name": "Cubone",
# "level": 20,
# "exp": 35000,
# "hp": 80,
# "strength": 600,
# "intelligence": 60,
# "dexterity": 200,
# "wisdom": 200
# }
# scyther = {
# "name": "Scyther",
# "level": 99,
# "exp": 7000,
# "hp": 40,
# "strength": 50,
# "intelligence": 40,
# "dexterity": 30,
# "wisdom": 57
# }
# slowpoke = {
# "name": "Slowpoke",
# "level": 1,
# "exp": 100,
# "hp": 80,
# "strength": 100,
# "intelligence": 10,
# "dexterity": 50,
# "wisdom": 200
# }
# pokemon_team = [mewtwo, blastoise, skarmory, cubone, scyther, slowpoke, charmander]
# collection.insert_many(pokemon_team)
# print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon
# #collection.insert_one({"_id": "OURVAL", "name":"TEST"})
# # can overwrite the _id but not insert duplicate _id values
# #breakpoint()
# pikas = list(collection.find({"name": "Pikachu"})) # SELECT * FROM pokemon WHERE name = "Pikachu"
# # print(len(pikas), "PIKAS")
# # print(pikas[0]["_id"]) #> ObjectId('5ebc31c79c171e43bb5ed469')
# # print(pikas[0]["name"])
# # strong = list(collection.find({"level": {"$gte": 60}} $or {"lvl": {"$gte": 60}}))
# # strong = list(collection.find({"level": {"$gte": 60}, "$or" "lvl": {"$gte": 60}}))
# strong = list(collection.find({"$or": [{"level": {"$gte": 60}}, {"lvl": {"$gte": 60}}]}))
# # TODO: also try to account for our mistakes "lvl" vs "level"
# breakpoint()
# print(strong)
| 26.685864 | 150 | 0.566314 |
import pymongo
import os
from dotenv import load_dotenv
import sqlite3
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE"
print("----------------")
print("URI:", connection_uri)
client = pymongo.MongoClient(connection_uri)
print("----------------")
print("CLIENT:", type(client), client)
| true | true |
790193b2894b656e53418e9157030a6e99a9f686 | 2,909 | py | Python | django/contrib/contenttypes/tests.py | jamespacileo/django | 9d3f86c72f5d22113b8cb5cd006abb9297f2fd4e | [
"BSD-3-Clause"
] | 2 | 2016-01-21T14:59:43.000Z | 2017-09-21T09:50:36.000Z | django/contrib/contenttypes/tests.py | coderanger/django | 4d358bd07c81badb95bf672d60fc131fe1c28789 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T15:34:08.000Z | 2022-02-11T15:34:08.000Z | django/contrib/contenttypes/tests.py | jamespacileo/django | 9d3f86c72f5d22113b8cb5cd006abb9297f2fd4e | [
"BSD-3-Clause"
] | null | null | null | from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
# First, let's make sure we're dealing with a blank slate (and that
# DEBUG is on so that queries get logged)
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
# A second hit, though, won't hit the DB, nor will a lookup by ID
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % current_site.domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
| 38.786667 | 77 | 0.663802 | from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
def test_lookup_cache(self):
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % current_site.domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
| true | true |
790193e67c0db381911710bf7ce3a6907825f58b | 1,737 | py | Python | flash_examples/object_detection.py | tszumowski/lightning-flash | d094fee4065d3d8d1337eed451041ee17fdf50aa | [
"Apache-2.0"
] | null | null | null | flash_examples/object_detection.py | tszumowski/lightning-flash | d094fee4065d3d8d1337eed451041ee17fdf50aa | [
"Apache-2.0"
] | null | null | null | flash_examples/object_detection.py | tszumowski/lightning-flash | d094fee4065d3d8d1337eed451041ee17fdf50aa | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
batch_size=2,
)
# 2. Build the task
model = ObjectDetector(model="retinanet", num_classes=datamodule.num_classes)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule)
# 4. Detect objects in a few images!
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("object_detection_model.pt")
| 34.74 | 106 | 0.763961 |
import torch
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
batch_size=2,
)
model = ObjectDetector(model="retinanet", num_classes=datamodule.num_classes)
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule)
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
trainer.save_checkpoint("object_detection_model.pt")
| true | true |
7901947e2a69429f5625edd884761977b10d5f6b | 1,815 | py | Python | tests/test_electricity.py | tngTUDOR/premise | f3ab48b590afaefe6ef431846561e934cac35de9 | [
"BSD-3-Clause"
] | null | null | null | tests/test_electricity.py | tngTUDOR/premise | f3ab48b590afaefe6ef431846561e934cac35de9 | [
"BSD-3-Clause"
] | null | null | null | tests/test_electricity.py | tngTUDOR/premise | f3ab48b590afaefe6ef431846561e934cac35de9 | [
"BSD-3-Clause"
] | null | null | null | # content of test_electricity.py
from premise import DATA_DIR
from premise.electricity import Electricity
from premise.data_collection import IAMDataCollection
REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv")
PRODUCTION_PER_TECH = (DATA_DIR / "electricity" / "electricity_production_volumes_per_tech.csv")
LOSS_PER_COUNTRY = (DATA_DIR / "electricity" / "losses_per_country.csv")
LHV_FUELS = (DATA_DIR / "fuels_lower_heating_value.txt")
def get_db():
dummy_db = [{
'name': 'fake activity',
'reference product': 'fake product',
'location': 'IAI Area, Africa',
'unit': 'kilogram',
'exchanges': [
{'name': 'fake activity',
'product': 'fake product',
'amount': 1,
'type': 'production',
'unit': 'kilogram',
'input': ('dummy_db', '6543541'), },
{'name': '1,4-Butanediol',
'categories': ('air', 'urban air close to ground'),
'amount': 1,
'type': 'biosphere',
'unit': 'kilogram',
'input': ('dummy_bio', '123'),
},
]
}]
version = 3.5
return dummy_db, version
rdc = IAMDataCollection(model="remind", pathway='SSP2-Base', year=2012, filepath_iam_files=DATA_DIR / "iam_output_files")
db, _ = get_db()
el = Electricity(db=db, iam_data=rdc, model="remind", pathway='SSP2-Base', year=2012)
def test_losses():
assert len(el.losses) == 174
assert el.losses['AL']['Production volume'] == 7630
def test_fuels_lhv():
assert float(el.fuels_lhv['hard coal']) == 20.1
def test_powerplant_map():
s = el.powerplant_map['Biomass IGCC CCS']
assert isinstance(s, set)
def test_emissions_map():
s = el.emissions_map['Sulfur dioxide']
assert isinstance(s, str)
| 30.25 | 121 | 0.616529 |
from premise import DATA_DIR
from premise.electricity import Electricity
from premise.data_collection import IAMDataCollection
REGION_MAPPING_FILEPATH = (DATA_DIR / "regionmappingH12.csv")
PRODUCTION_PER_TECH = (DATA_DIR / "electricity" / "electricity_production_volumes_per_tech.csv")
LOSS_PER_COUNTRY = (DATA_DIR / "electricity" / "losses_per_country.csv")
LHV_FUELS = (DATA_DIR / "fuels_lower_heating_value.txt")
def get_db():
dummy_db = [{
'name': 'fake activity',
'reference product': 'fake product',
'location': 'IAI Area, Africa',
'unit': 'kilogram',
'exchanges': [
{'name': 'fake activity',
'product': 'fake product',
'amount': 1,
'type': 'production',
'unit': 'kilogram',
'input': ('dummy_db', '6543541'), },
{'name': '1,4-Butanediol',
'categories': ('air', 'urban air close to ground'),
'amount': 1,
'type': 'biosphere',
'unit': 'kilogram',
'input': ('dummy_bio', '123'),
},
]
}]
version = 3.5
return dummy_db, version
rdc = IAMDataCollection(model="remind", pathway='SSP2-Base', year=2012, filepath_iam_files=DATA_DIR / "iam_output_files")
db, _ = get_db()
el = Electricity(db=db, iam_data=rdc, model="remind", pathway='SSP2-Base', year=2012)
def test_losses():
assert len(el.losses) == 174
assert el.losses['AL']['Production volume'] == 7630
def test_fuels_lhv():
assert float(el.fuels_lhv['hard coal']) == 20.1
def test_powerplant_map():
s = el.powerplant_map['Biomass IGCC CCS']
assert isinstance(s, set)
def test_emissions_map():
s = el.emissions_map['Sulfur dioxide']
assert isinstance(s, str)
| true | true |
7901954b2a84511020bb138f9a4bd5abd9e54aa6 | 11,497 | py | Python | optuna/structs.py | VladSkripniuk/optuna | 81d5b67a81ae14d606e6d6120ce50d02e90b0942 | [
"MIT"
] | null | null | null | optuna/structs.py | VladSkripniuk/optuna | 81d5b67a81ae14d606e6d6120ce50d02e90b0942 | [
"MIT"
] | null | null | null | optuna/structs.py | VladSkripniuk/optuna | 81d5b67a81ae14d606e6d6120ce50d02e90b0942 | [
"MIT"
] | null | null | null | import enum
import warnings
from optuna import exceptions
from optuna import logging
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from datetime import datetime # NOQA
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from optuna.distributions import BaseDistribution # NOQA
class TrialState(enum.Enum):
"""State of a :class:`~optuna.trial.Trial`.
Attributes:
RUNNING:
The :class:`~optuna.trial.Trial` is running.
COMPLETE:
The :class:`~optuna.trial.Trial` has been finished without any error.
PRUNED:
The :class:`~optuna.trial.Trial` has been pruned with
:class:`~optuna.exceptions.TrialPruned`.
FAIL:
The :class:`~optuna.trial.Trial` has failed due to an uncaught error.
"""
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
def __repr__(self):
# type: () -> str
return str(self)
def is_finished(self):
# type: () -> bool
return self != TrialState.RUNNING and self != TrialState.WAITING
class StudyDirection(enum.Enum):
"""Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
"""
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
class FrozenTrial(object):
"""Status and results of a :class:`~optuna.trial.Trial`.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
distributions:
Dictionary that contains the distributions of :attr:`params`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
"""
def __init__(
self,
number, # type: int
state, # type: TrialState
value, # type: Optional[float]
datetime_start, # type: Optional[datetime]
datetime_complete, # type: Optional[datetime]
params, # type: Dict[str, Any]
distributions, # type: Dict[str, BaseDistribution]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
intermediate_values, # type: Dict[int, float]
trial_id, # type: int
):
# type: (...) -> None
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
# Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation.
# TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved.
_ordered_fields = [
'number', 'value', 'datetime_start', 'datetime_complete', 'params', '_distributions',
'user_attrs', 'system_attrs', 'intermediate_values', '_trial_id', 'state', ]
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
# type: () -> int
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
# type: () -> str
return ('{cls}({kwargs})'.format(
cls=self.__class__.__name__,
kwargs=', '.join('{field}={value}'.format(
field=field if not field.startswith('_') else field[1:],
value=repr(getattr(self, field))) for field in self._ordered_fields)))
def _validate(self):
# type: () -> None
if self.datetime_start is None:
raise ValueError('`datetime_start` is supposed to be set.')
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError('`datetime_complete` is supposed to be set for a finished trial.')
else:
if self.datetime_complete is not None:
raise ValueError(
'`datetime_complete` is supposed to not be set for a finished trial.')
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError('`value` is supposed to be set for a complete trial.')
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError('Inconsistent parameters {} and distributions {}.'.format(
set(self.params.keys()), set(self.distributions.keys())))
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution {}.".
format(param_value, param_name, distribution))
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Return the distributions for this trial.
Returns:
The distributions.
"""
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
"""Set the distributions for this trial.
Args:
value: The distributions.
"""
self._distributions = value
@property
def trial_id(self):
# type: () -> int
"""Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID.
"""
warnings.warn(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.')
return self._trial_id
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
class StudySummary(object):
"""Basic attributes and aggregated results of a :class:`~optuna.study.Study`.
See also :func:`optuna.study.get_all_study_summaries`.
Attributes:
study_name:
Name of the :class:`~optuna.study.Study`.
direction:
:class:`StudyDirection` of the :class:`~optuna.study.Study`.
best_trial:
:class:`FrozenTrial` with best objective value in the :class:`~optuna.study.Study`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with
:func:`optuna.study.Study.set_user_attr`.
system_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally
set by Optuna.
n_trials:
The number of trials ran in the :class:`~optuna.study.Study`.
datetime_start:
Datetime where the :class:`~optuna.study.Study` started.
"""
def __init__(
self,
study_name, # type: str
direction, # type: StudyDirection
best_trial, # type: Optional[FrozenTrial]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
n_trials, # type: int
datetime_start, # type: Optional[datetime]
study_id, # type: int
):
# type: (...) -> None
self.study_name = study_name
self.direction = direction
self.best_trial = best_trial
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.n_trials = n_trials
self.datetime_start = datetime_start
self._study_id = study_id
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id < other._study_id
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id <= other._study_id
@property
def study_id(self):
# type: () -> int
"""Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.structs.StudySummary.study_name` instead.
Returns:
The study ID.
"""
message = 'The use of `StudySummary.study_id` is deprecated. ' \
'Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id
class TrialPruned(exceptions.TrialPruned):
"""Exception for pruned trials.
.. deprecated:: 0.19.0
This class was moved to :mod:`~optuna.exceptions`. Please use
:class:`~optuna.exceptions.TrialPruned` instead.
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
message = 'The use of `optuna.structs.TrialPruned` is deprecated. ' \
'Please use `optuna.exceptions.TrialPruned` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
| 32.02507 | 99 | 0.604593 | import enum
import warnings
from optuna import exceptions
from optuna import logging
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from datetime import datetime
from typing import Any
from typing import Dict
from typing import Optional
from optuna.distributions import BaseDistribution
class TrialState(enum.Enum):
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
def __repr__(self):
return str(self)
def is_finished(self):
return self != TrialState.RUNNING and self != TrialState.WAITING
class StudyDirection(enum.Enum):
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
class FrozenTrial(object):
def __init__(
self,
number,
state,
value,
datetime_start,
datetime_complete,
params,
distributions,
user_attrs,
system_attrs,
intermediate_values,
trial_id,
):
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
_ordered_fields = [
'number', 'value', 'datetime_start', 'datetime_complete', 'params', '_distributions',
'user_attrs', 'system_attrs', 'intermediate_values', '_trial_id', 'state', ]
def __eq__(self, other):
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
return ('{cls}({kwargs})'.format(
cls=self.__class__.__name__,
kwargs=', '.join('{field}={value}'.format(
field=field if not field.startswith('_') else field[1:],
value=repr(getattr(self, field))) for field in self._ordered_fields)))
def _validate(self):
if self.datetime_start is None:
raise ValueError('`datetime_start` is supposed to be set.')
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError('`datetime_complete` is supposed to be set for a finished trial.')
else:
if self.datetime_complete is not None:
raise ValueError(
'`datetime_complete` is supposed to not be set for a finished trial.')
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError('`value` is supposed to be set for a complete trial.')
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError('Inconsistent parameters {} and distributions {}.'.format(
set(self.params.keys()), set(self.distributions.keys())))
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution {}.".
format(param_value, param_name, distribution))
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
self._distributions = value
@property
def trial_id(self):
# type: () -> int
warnings.warn(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.')
return self._trial_id
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
class StudySummary(object):
def __init__(
self,
study_name, # type: str
direction, # type: StudyDirection
best_trial, # type: Optional[FrozenTrial]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
n_trials, # type: int
datetime_start, # type: Optional[datetime]
study_id, # type: int
):
# type: (...) -> None
self.study_name = study_name
self.direction = direction
self.best_trial = best_trial
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.n_trials = n_trials
self.datetime_start = datetime_start
self._study_id = study_id
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id < other._study_id
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id <= other._study_id
@property
def study_id(self):
# type: () -> int
message = 'The use of `StudySummary.study_id` is deprecated. ' \
'Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id
class TrialPruned(exceptions.TrialPruned):
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
message = 'The use of `optuna.structs.TrialPruned` is deprecated. ' \
'Please use `optuna.exceptions.TrialPruned` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
| true | true |
79019570f0ce5422b5ebfd62f0b79654a0cff3e6 | 4,541 | py | Python | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | Daemonxiao/airbyte | 34146564ba17423da8000e983722094f2426367e | [
"MIT"
] | null | null | null | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | Daemonxiao/airbyte | 34146564ba17423da8000e983722094f2426367e | [
"MIT"
] | null | null | null | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | Daemonxiao/airbyte | 34146564ba17423da8000e983722094f2426367e | [
"MIT"
] | null | null | null | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import re
from typing import Union
from jsonschema import RefResolver
from pydantic import BaseModel, Field
from .streams import DEFAULT_START_DATE, ReportGranularity
class OauthCredSpec(BaseModel):
class Config:
title = "OAuth2.0"
auth_type: str = Field(default="oauth2.0", const=True, order=0)
app_id: str = Field(title="App ID", description="The App ID applied by the developer.", airbyte_secret=True)
secret: str = Field(title="Secret", description="The private key of the developer's application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="Long-term Authorized Access Token.", airbyte_secret=True)
class SandboxEnvSpec(BaseModel):
class Config:
title = "Sandbox Access Token"
auth_type: str = Field(default="sandbox_access_token", const=True, order=0)
# it is string because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
advertiser_id: str = Field(
title="Advertiser ID", description="The Advertiser ID which generated for the developer's Sandbox application."
)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class ProductionEnvSpec(BaseModel):
class Config:
title = "Production Access Token"
auth_type: str = Field(default="prod_access_token", const=True, order=0)
# it is float because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
app_id: str = Field(description="The App ID applied by the developer.", title="App ID")
secret: str = Field(title="Secret", description="The private key of the developer application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class SourceTiktokMarketingSpec(BaseModel):
class Config:
title = "TikTok Marketing Source Spec"
start_date: str = Field(
title="Start Date",
default=DEFAULT_START_DATE,
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
description="The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. "
"If this parameter is not set, all data will be replicated.",
order=0,
)
report_granularity: str = Field(
title="Report Granularity",
description="Which time granularity should be grouped by; for LIFETIME there will be no grouping. "
"This option is used for reports' streams only.",
default=ReportGranularity.default().value,
enum=[g.value for g in ReportGranularity],
order=1,
)
credentials: Union[OauthCredSpec, ProductionEnvSpec, SandboxEnvSpec] = Field(
title="Authorization Method", order=3, default={}, type="object"
)
@classmethod
def change_format_to_oneOf(cls, schema: dict) -> dict:
new_schema = {}
for key, value in schema.items():
if isinstance(value, dict):
value = cls.change_format_to_oneOf(value)
if key == "anyOf":
new_schema["oneOf"] = value
else:
new_schema[key] = value
return new_schema
@staticmethod
def resolve_refs(schema: dict) -> dict:
json_schema_ref_resolver = RefResolver.from_schema(schema)
str_schema = json.dumps(schema)
for ref_block in re.findall(r'{"\$ref": "#\/definitions\/.+?(?="})"}', str_schema):
ref = json.loads(ref_block)["$ref"]
str_schema = str_schema.replace(ref_block, json.dumps(json_schema_ref_resolver.resolve(ref)[1]))
pyschema = json.loads(str_schema)
del pyschema["definitions"]
return pyschema
@classmethod
def schema(cls) -> dict:
"""we're overriding the schema classmethod to enable some post-processing"""
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema)
class CompleteOauthOutputSpecification(BaseModel):
access_token: str = Field(path_in_connector_config=["credentials", "access_token"])
class CompleteOauthServerInputSpecification(BaseModel):
app_id: str = Field()
secret: str = Field()
class CompleteOauthServerOutputSpecification(BaseModel):
app_id: str = Field(path_in_connector_config=["credentials", "app_id"])
secret: str = Field(path_in_connector_config=["credentials", "secret"])
| 36.328 | 126 | 0.68355 |
import json
import re
from typing import Union
from jsonschema import RefResolver
from pydantic import BaseModel, Field
from .streams import DEFAULT_START_DATE, ReportGranularity
class OauthCredSpec(BaseModel):
class Config:
title = "OAuth2.0"
auth_type: str = Field(default="oauth2.0", const=True, order=0)
app_id: str = Field(title="App ID", description="The App ID applied by the developer.", airbyte_secret=True)
secret: str = Field(title="Secret", description="The private key of the developer's application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="Long-term Authorized Access Token.", airbyte_secret=True)
class SandboxEnvSpec(BaseModel):
class Config:
title = "Sandbox Access Token"
auth_type: str = Field(default="sandbox_access_token", const=True, order=0)
# it is string because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
advertiser_id: str = Field(
title="Advertiser ID", description="The Advertiser ID which generated for the developer's Sandbox application."
)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class ProductionEnvSpec(BaseModel):
class Config:
title = "Production Access Token"
auth_type: str = Field(default="prod_access_token", const=True, order=0)
app_id: str = Field(description="The App ID applied by the developer.", title="App ID")
secret: str = Field(title="Secret", description="The private key of the developer application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class SourceTiktokMarketingSpec(BaseModel):
class Config:
title = "TikTok Marketing Source Spec"
start_date: str = Field(
title="Start Date",
default=DEFAULT_START_DATE,
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
description="The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. "
"If this parameter is not set, all data will be replicated.",
order=0,
)
report_granularity: str = Field(
title="Report Granularity",
description="Which time granularity should be grouped by; for LIFETIME there will be no grouping. "
"This option is used for reports' streams only.",
default=ReportGranularity.default().value,
enum=[g.value for g in ReportGranularity],
order=1,
)
credentials: Union[OauthCredSpec, ProductionEnvSpec, SandboxEnvSpec] = Field(
title="Authorization Method", order=3, default={}, type="object"
)
@classmethod
def change_format_to_oneOf(cls, schema: dict) -> dict:
new_schema = {}
for key, value in schema.items():
if isinstance(value, dict):
value = cls.change_format_to_oneOf(value)
if key == "anyOf":
new_schema["oneOf"] = value
else:
new_schema[key] = value
return new_schema
@staticmethod
def resolve_refs(schema: dict) -> dict:
json_schema_ref_resolver = RefResolver.from_schema(schema)
str_schema = json.dumps(schema)
for ref_block in re.findall(r'{"\$ref": "#\/definitions\/.+?(?="})"}', str_schema):
ref = json.loads(ref_block)["$ref"]
str_schema = str_schema.replace(ref_block, json.dumps(json_schema_ref_resolver.resolve(ref)[1]))
pyschema = json.loads(str_schema)
del pyschema["definitions"]
return pyschema
@classmethod
def schema(cls) -> dict:
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema)
class CompleteOauthOutputSpecification(BaseModel):
access_token: str = Field(path_in_connector_config=["credentials", "access_token"])
class CompleteOauthServerInputSpecification(BaseModel):
app_id: str = Field()
secret: str = Field()
class CompleteOauthServerOutputSpecification(BaseModel):
app_id: str = Field(path_in_connector_config=["credentials", "app_id"])
secret: str = Field(path_in_connector_config=["credentials", "secret"])
| true | true |
7901972788c7e85bdf00e42e2955aa26e1516f7f | 839 | py | Python | examples/Basic_Disease_Models/Example_1/generate_events.py | healthbadge/episimmer | fcb3f7df812be045e2a6d031cac42080ad850d60 | [
"BSD-3-Clause"
] | 16 | 2021-04-26T14:52:32.000Z | 2022-01-22T07:13:06.000Z | examples/Basic_Disease_Models/Example_1/generate_events.py | healthbadge/episimmer | fcb3f7df812be045e2a6d031cac42080ad850d60 | [
"BSD-3-Clause"
] | 34 | 2021-05-21T12:53:24.000Z | 2022-02-09T16:30:40.000Z | examples/Basic_Disease_Models/Example_1/generate_events.py | healthbadge/episimmer | fcb3f7df812be045e2a6d031cac42080ad850d60 | [
"BSD-3-Clause"
] | 4 | 2021-04-08T07:52:06.000Z | 2021-05-29T05:58:15.000Z | import random
import numpy as np
def write_to_file(filename,no_locations,no_agents):
info_dict={}
#ID enumerates from 0 to n-1
header='Location Index:Agents:Time Interval'
n=random.randint(10,20)
f=open(filename,'w')
f.write(str(n)+'\n')
f.write(header+'\n')
for i in range(n):
line=str(random.randint(0,no_locations-1))+':'
for i in range(random.randint(0,20)):
line+=str(random.randint(0,no_agents-1))+','
line+=str(random.randint(0,no_agents-1))
line+=':'+str(random.choice([10,30,45,60]))+'\n'
f.write(line)
write_to_file('monday_events.txt',10,100)
write_to_file('tuesday_events.txt',10,100)
write_to_file('wednesday_events.txt',10,100)
write_to_file('thursday_events.txt',10,100)
write_to_file('friday_events.txt',10,100)
write_to_file('saturday_events.txt',5,100)
write_to_file('sunday_events.txt',2,100)
| 27.064516 | 51 | 0.72944 | import random
import numpy as np
def write_to_file(filename,no_locations,no_agents):
info_dict={}
header='Location Index:Agents:Time Interval'
n=random.randint(10,20)
f=open(filename,'w')
f.write(str(n)+'\n')
f.write(header+'\n')
for i in range(n):
line=str(random.randint(0,no_locations-1))+':'
for i in range(random.randint(0,20)):
line+=str(random.randint(0,no_agents-1))+','
line+=str(random.randint(0,no_agents-1))
line+=':'+str(random.choice([10,30,45,60]))+'\n'
f.write(line)
write_to_file('monday_events.txt',10,100)
write_to_file('tuesday_events.txt',10,100)
write_to_file('wednesday_events.txt',10,100)
write_to_file('thursday_events.txt',10,100)
write_to_file('friday_events.txt',10,100)
write_to_file('saturday_events.txt',5,100)
write_to_file('sunday_events.txt',2,100)
| true | true |
790198c2c1cf564a270c200b70a30d603910f570 | 7,354 | py | Python | python_modules/dagster/dagster/core/storage/event_log/polling_event_watcher.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | python_modules/dagster/dagster/core/storage/event_log/polling_event_watcher.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | python_modules/dagster/dagster/core/storage/event_log/polling_event_watcher.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | import threading
from typing import Callable, List, MutableMapping, NamedTuple
from dagster import check
from dagster.core.events.log import EventLogEntry
from .sql_event_log import SqlEventLogStorage
POLLING_CADENCE = 0.1 # 100 ms
class CallbackAfterCursor(NamedTuple):
"""Callback passed from Observer class in event polling
start_cursor (int): Only process EventLogEntrys with an id >= start_cursor
(earlier ones have presumably already been processed)
callback (Callable[[EventLogEntry], None]): callback passed from Observer
to call on new EventLogEntrys
"""
start_cursor: int
callback: Callable[[EventLogEntry], None]
class SqlPollingEventWatcher:
"""Event Log Watcher that uses a multithreaded polling approach to retrieving new events for run_ids
This class' job is to manage a collection of threads that each poll the event log for a given run_id
Uses one thread (SqlPollingRunIdEventWatcherThread) per watched run_id
LOCKING INFO:
ORDER: _dict_lock -> run_id_thread.callback_fn_list_lock
INVARIANTS: _dict_lock protects _run_id_to_watcher_dict
"""
def __init__(self, event_log_storage: SqlEventLogStorage):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
# INVARIANT: dict_lock protects _run_id_to_watcher_dict
self._dict_lock: threading.Lock = threading.Lock()
self._run_id_to_watcher_dict: MutableMapping[str, SqlPollingRunIdEventWatcherThread] = {}
self._disposed = False
def has_run_id(self, run_id: str) -> bool:
run_id = check.str_param(run_id, "run_id")
with self._dict_lock:
_has_run_id = run_id in self._run_id_to_watcher_dict
return _has_run_id
def watch_run(self, run_id: str, start_cursor: int, callback: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._dict_lock:
if run_id not in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id] = SqlPollingRunIdEventWatcherThread(
self._event_log_storage, run_id
)
self._run_id_to_watcher_dict[run_id].daemon = True
self._run_id_to_watcher_dict[run_id].start()
self._run_id_to_watcher_dict[run_id].add_callback(start_cursor, callback)
def unwatch_run(self, run_id: str, handler: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
handler = check.callable_param(handler, "handler")
with self._dict_lock:
if run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].remove_callback(handler)
if self._run_id_to_watcher_dict[run_id].should_thread_exit.is_set():
del self._run_id_to_watcher_dict[run_id]
def __del__(self):
self.close()
def close(self):
if not self._disposed:
self._disposed = True
with self._dict_lock:
for watcher_thread in self._run_id_to_watcher_dict.values():
if not watcher_thread.should_thread_exit.is_set():
watcher_thread.should_thread_exit.set()
for run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].join()
del self._run_id_to_watcher_dict
class SqlPollingRunIdEventWatcherThread(threading.Thread):
"""subclass of Thread that watches a given run_id for new Events by polling every POLLING_CADENCE
Holds a list of callbacks (_callback_fn_list) each passed in by an `Observer`. Note that
the callbacks have a cursor associated; this means that the callbacks should be
only executed on EventLogEntrys with an associated id >= callback.start_cursor
Exits when `self.should_thread_exit` is set.
LOCKING INFO:
INVARIANTS: _callback_fn_list_lock protects _callback_fn_list
"""
def __init__(self, event_log_storage: SqlEventLogStorage, run_id: str):
super(SqlPollingRunIdEventWatcherThread, self).__init__()
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
self._run_id = check.str_param(run_id, "run_id")
self._callback_fn_list_lock: threading.Lock = threading.Lock()
self._callback_fn_list: List[CallbackAfterCursor] = []
self._should_thread_exit = threading.Event()
self.name = f"mysql-event-watch-run-id-{self._run_id}"
@property
def should_thread_exit(self) -> threading.Event:
return self._should_thread_exit
def add_callback(self, start_cursor: int, callback: Callable[[EventLogEntry], None]):
"""Observer has started watching this run.
Add a callback to execute on new EventLogEntrys st. id >= start_cursor
Args:
start_cursor (int): minimum event_id for the callback to execute
callback (Callable[[EventLogEntry], None]): callback to update the Dagster UI
"""
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list.append(CallbackAfterCursor(start_cursor, callback))
def remove_callback(self, callback: Callable[[EventLogEntry], None]):
"""Observer has stopped watching this run;
Remove a callback from the list of callbacks to execute on new EventLogEntrys
Also kill thread if no callbacks remaining (i.e. no Observers are watching this run_id)
Args:
callback (Callable[[EventLogEntry], None]): callback to remove from list of callbacks
"""
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list = [
callback_with_cursor
for callback_with_cursor in self._callback_fn_list
if callback_with_cursor.callback != callback
]
if not self._callback_fn_list:
self._should_thread_exit.set()
def run(self):
"""Polling function to update Observers with EventLogEntrys from Event Log DB.
Wakes every POLLING_CADENCE &
1. executes a SELECT query to get new EventLogEntrys
2. fires each callback (taking into account the callback.cursor) on the new EventLogEntrys
Uses max_index_so_far as a cursor in the DB to make sure that only new records are retrieved
"""
cursor = -1
while not self._should_thread_exit.wait(POLLING_CADENCE):
events = self._event_log_storage.get_logs_for_run(self._run_id, cursor=cursor)
for event_record in events:
cursor += 1
with self._callback_fn_list_lock:
for callback_with_cursor in self._callback_fn_list:
if callback_with_cursor.start_cursor < cursor:
callback_with_cursor.callback(event_record)
| 44.841463 | 104 | 0.68371 | import threading
from typing import Callable, List, MutableMapping, NamedTuple
from dagster import check
from dagster.core.events.log import EventLogEntry
from .sql_event_log import SqlEventLogStorage
POLLING_CADENCE = 0.1
class CallbackAfterCursor(NamedTuple):
start_cursor: int
callback: Callable[[EventLogEntry], None]
class SqlPollingEventWatcher:
def __init__(self, event_log_storage: SqlEventLogStorage):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
self._dict_lock: threading.Lock = threading.Lock()
self._run_id_to_watcher_dict: MutableMapping[str, SqlPollingRunIdEventWatcherThread] = {}
self._disposed = False
def has_run_id(self, run_id: str) -> bool:
run_id = check.str_param(run_id, "run_id")
with self._dict_lock:
_has_run_id = run_id in self._run_id_to_watcher_dict
return _has_run_id
def watch_run(self, run_id: str, start_cursor: int, callback: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._dict_lock:
if run_id not in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id] = SqlPollingRunIdEventWatcherThread(
self._event_log_storage, run_id
)
self._run_id_to_watcher_dict[run_id].daemon = True
self._run_id_to_watcher_dict[run_id].start()
self._run_id_to_watcher_dict[run_id].add_callback(start_cursor, callback)
def unwatch_run(self, run_id: str, handler: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
handler = check.callable_param(handler, "handler")
with self._dict_lock:
if run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].remove_callback(handler)
if self._run_id_to_watcher_dict[run_id].should_thread_exit.is_set():
del self._run_id_to_watcher_dict[run_id]
def __del__(self):
self.close()
def close(self):
if not self._disposed:
self._disposed = True
with self._dict_lock:
for watcher_thread in self._run_id_to_watcher_dict.values():
if not watcher_thread.should_thread_exit.is_set():
watcher_thread.should_thread_exit.set()
for run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].join()
del self._run_id_to_watcher_dict
class SqlPollingRunIdEventWatcherThread(threading.Thread):
def __init__(self, event_log_storage: SqlEventLogStorage, run_id: str):
super(SqlPollingRunIdEventWatcherThread, self).__init__()
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
self._run_id = check.str_param(run_id, "run_id")
self._callback_fn_list_lock: threading.Lock = threading.Lock()
self._callback_fn_list: List[CallbackAfterCursor] = []
self._should_thread_exit = threading.Event()
self.name = f"mysql-event-watch-run-id-{self._run_id}"
@property
def should_thread_exit(self) -> threading.Event:
return self._should_thread_exit
def add_callback(self, start_cursor: int, callback: Callable[[EventLogEntry], None]):
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list.append(CallbackAfterCursor(start_cursor, callback))
def remove_callback(self, callback: Callable[[EventLogEntry], None]):
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list = [
callback_with_cursor
for callback_with_cursor in self._callback_fn_list
if callback_with_cursor.callback != callback
]
if not self._callback_fn_list:
self._should_thread_exit.set()
def run(self):
cursor = -1
while not self._should_thread_exit.wait(POLLING_CADENCE):
events = self._event_log_storage.get_logs_for_run(self._run_id, cursor=cursor)
for event_record in events:
cursor += 1
with self._callback_fn_list_lock:
for callback_with_cursor in self._callback_fn_list:
if callback_with_cursor.start_cursor < cursor:
callback_with_cursor.callback(event_record)
| true | true |
790199eeccb4e794f0e44752ad662d398aed9d94 | 1,332 | py | Python | djangocms_newsletter/cmsplugin_newsletter/cms_plugins.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | null | null | null | djangocms_newsletter/cmsplugin_newsletter/cms_plugins.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | null | null | null | djangocms_newsletter/cmsplugin_newsletter/cms_plugins.py | nephila/djangocms-newsletter | 5ebd8d3e1e2c85b2791d0261a954469f2548c840 | [
"BSD-3-Clause"
] | 2 | 2021-03-15T13:33:53.000Z | 2021-05-18T20:34:47.000Z | """Plugins for CMS"""
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from emencia.django.newsletter.cmsplugin_newsletter import settings
from emencia.django.newsletter.cmsplugin_newsletter.models import SubscriptionFormPlugin
from emencia.django.newsletter.forms import MailingListSubscriptionForm
class CMSSubscriptionFormPlugin(CMSPluginBase):
module = _('newsletter')
model = SubscriptionFormPlugin
name = _('Subscription Form')
render_template = 'newsletter/cms/subscription_form.html'
text_enabled = False
admin_preview = False
def render(self, context, instance, placeholder):
request = context['request']
if request.method == "POST" and (settings.FORM_NAME in request.POST.keys()):
form = MailingListSubscriptionForm(data=request.POST)
if form.is_valid():
form.save(instance.mailing_list)
form.saved = True
else:
form = MailingListSubscriptionForm()
context.update({
'object': instance,
'form': form,
'form_name': settings.FORM_NAME,
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(CMSSubscriptionFormPlugin)
| 34.153846 | 88 | 0.698949 | from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from emencia.django.newsletter.cmsplugin_newsletter import settings
from emencia.django.newsletter.cmsplugin_newsletter.models import SubscriptionFormPlugin
from emencia.django.newsletter.forms import MailingListSubscriptionForm
class CMSSubscriptionFormPlugin(CMSPluginBase):
module = _('newsletter')
model = SubscriptionFormPlugin
name = _('Subscription Form')
render_template = 'newsletter/cms/subscription_form.html'
text_enabled = False
admin_preview = False
def render(self, context, instance, placeholder):
request = context['request']
if request.method == "POST" and (settings.FORM_NAME in request.POST.keys()):
form = MailingListSubscriptionForm(data=request.POST)
if form.is_valid():
form.save(instance.mailing_list)
form.saved = True
else:
form = MailingListSubscriptionForm()
context.update({
'object': instance,
'form': form,
'form_name': settings.FORM_NAME,
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(CMSSubscriptionFormPlugin)
| true | true |
79019cd943ae685404eb86da4eca1e080ec0167d | 3,361 | py | Python | lab01/lab01/settings.py | car1os1/TECSUP-DAE-2021-2-B | 263be9e52814ec96708650d4e417ab393075d74e | [
"MIT"
] | null | null | null | lab01/lab01/settings.py | car1os1/TECSUP-DAE-2021-2-B | 263be9e52814ec96708650d4e417ab393075d74e | [
"MIT"
] | null | null | null | lab01/lab01/settings.py | car1os1/TECSUP-DAE-2021-2-B | 263be9e52814ec96708650d4e417ab393075d74e | [
"MIT"
] | null | null | null | """
Django settings for lab01 project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-7-8hv&pc-$$1)7eiiy2m#m^o6cx%oqqv9@z071ec0%218iwt0!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lab01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lab01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.674603 | 92 | 0.675097 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-7-8hv&pc-$$1)7eiiy2m#m^o6cx%oqqv9@z071ec0%218iwt0!'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lab01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lab01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
79019d36bbcb74e56567c705cb5879346cda1bc4 | 11,711 | py | Python | test/test_client.py | roidnn/google-maps-services-python | ca439ee9b5aaca21ffba54134c91e991dcccb4b4 | [
"Apache-2.0"
] | 1 | 2021-09-01T16:52:26.000Z | 2021-09-01T16:52:26.000Z | test/test_client.py | lamantin/google-maps-services-python | 396e03ce3ffc7d1d98634c9932408272cfc20c18 | [
"Apache-2.0"
] | null | null | null | test/test_client.py | lamantin/google-maps-services-python | 396e03ce3ffc7d1d98634c9932408272cfc20c18 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for client module."""
import responses
import time
import googlemaps
from googlemaps import client as _client
import test as _test
import requests
class ClientTest(_test.TestCase):
def test_no_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client()
client.directions("Sydney", "Melbourne")
def test_invalid_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client(key="Invalid key.")
client.directions("Sydney", "Melbourne")
def test_urlencode(self):
# See GH #72.
encoded_params = _client.urlencode_params([("address", "=Sydney ~")])
self.assertEqual("address=%3DSydney+~", encoded_params)
@responses.activate
def test_queries_per_second(self):
# This test assumes that the time to run a mocked query is
# relatively small, eg a few milliseconds. We define a rate of
# 3 queries per second, and run double that, which should take at
# least 1 second but no more than 2.
queries_per_second = 3
query_range = range(queries_per_second * 2)
for _ in query_range:
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
queries_per_second=queries_per_second)
start = time.time()
for _ in query_range:
client.geocode("Sesame St.")
end = time.time()
self.assertTrue(start + 1 < end < start + 2)
@responses.activate
def test_key_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.",
responses.calls[0].request.url)
@responses.activate
def test_extra_params(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.", extra_params={"foo": "bar"})
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.&foo=bar",
responses.calls[0].request.url)
def test_hmac(self):
"""
From http://en.wikipedia.org/wiki/Hash-based_message_authentication_code
HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog")
= 0xde7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9
"""
message = "The quick brown fox jumps over the lazy dog"
key = "a2V5" # "key" -> base64
signature = "3nybhbi3iqa8ino29wqQcBydtNk="
self.assertEqual(signature, _client.sign_hmac(key, message))
@responses.activate
def test_url_signed(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(client_id="foo", client_secret="a2V5")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
# Check ordering of parameters.
self.assertIn("address=Sesame+St.&client=foo&signature",
responses.calls[0].request.url)
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"address=Sesame+St.&client=foo&"
"signature=fxbWUIcNPZSekVOhp2ul9LW5TpY=",
responses.calls[0].request.url)
@responses.activate
def test_ua_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
user_agent = responses.calls[0].request.headers["User-Agent"]
self.assertTrue(user_agent.startswith("GoogleGeoApiClientPython"))
@responses.activate
def test_retry(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (200, {}, '{"status":"OVER_QUERY_LIMIT"}')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type='application/json',
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
self.assertEqual(responses.calls[0].request.url, responses.calls[1].request.url)
@responses.activate
def test_transport_error(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
status=404,
content_type='application/json')
client = googlemaps.Client(key="AIzaasdf")
with self.assertRaises(googlemaps.exceptions.HTTPError) as e:
client.geocode("Foo")
self.assertEqual(e.exception.status_code, 404)
@responses.activate
def test_host_override(self):
responses.add(responses.GET,
"https://foo.com/bar",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client._get("/bar", {}, base_url="https://foo.com")
self.assertEqual(1, len(responses.calls))
@responses.activate
def test_custom_extract(self):
def custom_extract(resp):
return resp.json()
responses.add(responses.GET,
"https://maps.googleapis.com/bar",
body='{"error":"errormessage"}',
status=403,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
b = client._get("/bar", {}, extract_body=custom_extract)
self.assertEqual(1, len(responses.calls))
self.assertEqual("errormessage", b["error"])
@responses.activate
def test_retry_intermittent(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (500, {}, 'Internal Server Error.')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type="application/json",
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
def test_channel_without_client_id(self):
with self.assertRaises(ValueError):
client = googlemaps.Client(key="AIzaasdf", channel="mychannel")
def test_invalid_channel(self):
# Cf. limitations here:
# https://developers.google.com/maps/premium/reports
# /usage-reports#channels
with self.assertRaises(ValueError):
client = googlemaps.Client(client_id="foo", client_secret="a2V5",
channel="auieauie$? ")
def test_auth_url_with_channel(self):
client = googlemaps.Client(key="AIzaasdf",
client_id="foo",
client_secret="a2V5",
channel="MyChannel_1")
# Check ordering of parameters + signature.
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=True)
self.assertEqual(auth_url, "/test?param=param"
"&channel=MyChannel_1"
"&client=foo"
"&signature=OH18GuQto_mEpxj99UimKskvo4k=")
# Check if added to requests to API with accepts_clientid=False
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=False)
self.assertEqual(auth_url, "/test?param=param&key=AIzaasdf")
def test_requests_version(self):
client_args_timeout = {
"key": "AIzaasdf",
"client_id": "foo",
"client_secret": "a2V5",
"channel": "MyChannel_1",
"connect_timeout": 5,
"read_timeout": 5
}
client_args = client_args_timeout.copy()
del client_args["connect_timeout"]
del client_args["read_timeout"]
requests.__version__ = '2.3.0'
with self.assertRaises(NotImplementedError):
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
requests.__version__ = '2.4.0'
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
@responses.activate
def test_no_retry_over_query_limit(self):
responses.add(responses.GET,
"https://maps.googleapis.com/foo",
body='{"status":"OVER_QUERY_LIMIT"}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
retry_over_query_limit=False)
with self.assertRaises(googlemaps.exceptions.ApiError):
client._request("/foo", {})
self.assertEqual(1, len(responses.calls))
| 38.14658 | 88 | 0.571343 |
import responses
import time
import googlemaps
from googlemaps import client as _client
import test as _test
import requests
class ClientTest(_test.TestCase):
def test_no_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client()
client.directions("Sydney", "Melbourne")
def test_invalid_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client(key="Invalid key.")
client.directions("Sydney", "Melbourne")
def test_urlencode(self):
encoded_params = _client.urlencode_params([("address", "=Sydney ~")])
self.assertEqual("address=%3DSydney+~", encoded_params)
@responses.activate
def test_queries_per_second(self):
queries_per_second = 3
query_range = range(queries_per_second * 2)
for _ in query_range:
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
queries_per_second=queries_per_second)
start = time.time()
for _ in query_range:
client.geocode("Sesame St.")
end = time.time()
self.assertTrue(start + 1 < end < start + 2)
@responses.activate
def test_key_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.",
responses.calls[0].request.url)
@responses.activate
def test_extra_params(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.", extra_params={"foo": "bar"})
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.&foo=bar",
responses.calls[0].request.url)
def test_hmac(self):
message = "The quick brown fox jumps over the lazy dog"
key = "a2V5"
signature = "3nybhbi3iqa8ino29wqQcBydtNk="
self.assertEqual(signature, _client.sign_hmac(key, message))
@responses.activate
def test_url_signed(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(client_id="foo", client_secret="a2V5")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
self.assertIn("address=Sesame+St.&client=foo&signature",
responses.calls[0].request.url)
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"address=Sesame+St.&client=foo&"
"signature=fxbWUIcNPZSekVOhp2ul9LW5TpY=",
responses.calls[0].request.url)
@responses.activate
def test_ua_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
user_agent = responses.calls[0].request.headers["User-Agent"]
self.assertTrue(user_agent.startswith("GoogleGeoApiClientPython"))
@responses.activate
def test_retry(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (200, {}, '{"status":"OVER_QUERY_LIMIT"}')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type='application/json',
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
self.assertEqual(responses.calls[0].request.url, responses.calls[1].request.url)
@responses.activate
def test_transport_error(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
status=404,
content_type='application/json')
client = googlemaps.Client(key="AIzaasdf")
with self.assertRaises(googlemaps.exceptions.HTTPError) as e:
client.geocode("Foo")
self.assertEqual(e.exception.status_code, 404)
@responses.activate
def test_host_override(self):
responses.add(responses.GET,
"https://foo.com/bar",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client._get("/bar", {}, base_url="https://foo.com")
self.assertEqual(1, len(responses.calls))
@responses.activate
def test_custom_extract(self):
def custom_extract(resp):
return resp.json()
responses.add(responses.GET,
"https://maps.googleapis.com/bar",
body='{"error":"errormessage"}',
status=403,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
b = client._get("/bar", {}, extract_body=custom_extract)
self.assertEqual(1, len(responses.calls))
self.assertEqual("errormessage", b["error"])
@responses.activate
def test_retry_intermittent(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (500, {}, 'Internal Server Error.')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type="application/json",
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
def test_channel_without_client_id(self):
with self.assertRaises(ValueError):
client = googlemaps.Client(key="AIzaasdf", channel="mychannel")
def test_invalid_channel(self):
with self.assertRaises(ValueError):
client = googlemaps.Client(client_id="foo", client_secret="a2V5",
channel="auieauie$? ")
def test_auth_url_with_channel(self):
client = googlemaps.Client(key="AIzaasdf",
client_id="foo",
client_secret="a2V5",
channel="MyChannel_1")
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=True)
self.assertEqual(auth_url, "/test?param=param"
"&channel=MyChannel_1"
"&client=foo"
"&signature=OH18GuQto_mEpxj99UimKskvo4k=")
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=False)
self.assertEqual(auth_url, "/test?param=param&key=AIzaasdf")
def test_requests_version(self):
client_args_timeout = {
"key": "AIzaasdf",
"client_id": "foo",
"client_secret": "a2V5",
"channel": "MyChannel_1",
"connect_timeout": 5,
"read_timeout": 5
}
client_args = client_args_timeout.copy()
del client_args["connect_timeout"]
del client_args["read_timeout"]
requests.__version__ = '2.3.0'
with self.assertRaises(NotImplementedError):
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
requests.__version__ = '2.4.0'
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
@responses.activate
def test_no_retry_over_query_limit(self):
responses.add(responses.GET,
"https://maps.googleapis.com/foo",
body='{"status":"OVER_QUERY_LIMIT"}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
retry_over_query_limit=False)
with self.assertRaises(googlemaps.exceptions.ApiError):
client._request("/foo", {})
self.assertEqual(1, len(responses.calls))
| true | true |
79019d60a3b662abbc61d3a75776f4170b15dd80 | 1,294 | py | Python | autoshort.py | lawja/AutoSHRTNR | 7c9242df3b40913449a7a714fdd02abf4c608f26 | [
"MIT"
] | 1 | 2017-12-09T21:23:53.000Z | 2017-12-09T21:23:53.000Z | autoshort.py | lawja/AutoSHRTNR | 7c9242df3b40913449a7a714fdd02abf4c608f26 | [
"MIT"
] | null | null | null | autoshort.py | lawja/AutoSHRTNR | 7c9242df3b40913449a7a714fdd02abf4c608f26 | [
"MIT"
] | null | null | null | import inspect
import os
import pyperclip
import requests
import time
from urllib.parse import quote
# a list of the request error classes
request_errors = [obj for name, obj in inspect.getmembers(requests.exceptions)
if inspect.isclass(obj) and issubclass(obj, Exception)]
# main daemon loop
while True:
# get clipboard value
clipboard = pyperclip.paste()
try:
# percent encode the clipboard value
safe_cb = quote(clipboard,safe='')
# bitly API access token
token = os.environ.get('BITLY_TOKEN')
# URL that will make the API call
bitly_url = 'https://api-ssl.bitly.com/v3/shorten?' + \
'access_token=' + token + '&longUrl=' + safe_cb
# get the json return from the API call
short_url = requests.get(bitly_url).json()
# if everything went as planned
if(short_url['status_txt'] == 'OK'):
pyperclip.copy(short_url['data']['url'])
except Exception as e:
# if something went wrong with the request, i.e. not a link
if(any(issubclass(e.__class__, lv) for lv in request_errors)):
pass
else:
raise(e)
# wait until the clipboard changes
while(pyperclip.paste() == clipboard):
time.sleep(.1)
| 34.972973 | 78 | 0.629057 | import inspect
import os
import pyperclip
import requests
import time
from urllib.parse import quote
request_errors = [obj for name, obj in inspect.getmembers(requests.exceptions)
if inspect.isclass(obj) and issubclass(obj, Exception)]
while True:
clipboard = pyperclip.paste()
try:
safe_cb = quote(clipboard,safe='')
token = os.environ.get('BITLY_TOKEN')
bitly_url = 'https://api-ssl.bitly.com/v3/shorten?' + \
'access_token=' + token + '&longUrl=' + safe_cb
short_url = requests.get(bitly_url).json()
if(short_url['status_txt'] == 'OK'):
pyperclip.copy(short_url['data']['url'])
except Exception as e:
if(any(issubclass(e.__class__, lv) for lv in request_errors)):
pass
else:
raise(e)
while(pyperclip.paste() == clipboard):
time.sleep(.1)
| true | true |
79019e6692b7080cea95100ec92b84990cdcb1bc | 761 | py | Python | setup.py | adalekin/ngenix-test | 8d0b001e614cc6d18002ccd224cb8c3568128774 | [
"MIT"
] | null | null | null | setup.py | adalekin/ngenix-test | 8d0b001e614cc6d18002ccd224cb8c3568128774 | [
"MIT"
] | null | null | null | setup.py | adalekin/ngenix-test | 8d0b001e614cc6d18002ccd224cb8c3568128774 | [
"MIT"
] | null | null | null | import versioneer
commands = versioneer.get_cmdclass().copy()
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name='ngenix-test',
version=versioneer.get_version(),
packages=find_packages(),
url='https://github.com/adalekin/ngenix-test',
license='MIT',
author='Aleksey Dalekin',
author_email='adalekin@gmail.com',
description='A te.',
long_description=open('README.md', 'rt').read(),
package_dir={'ngenix_test': 'ngenix_test'},
include_package_data=True,
install_requires=[
],
cmdclass=commands,
entry_points='''
[console_scripts]
nginx-test=nginx_test.run:main
'''
)
| 26.241379 | 53 | 0.65703 | import versioneer
commands = versioneer.get_cmdclass().copy()
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
setup(
name='ngenix-test',
version=versioneer.get_version(),
packages=find_packages(),
url='https://github.com/adalekin/ngenix-test',
license='MIT',
author='Aleksey Dalekin',
author_email='adalekin@gmail.com',
description='A te.',
long_description=open('README.md', 'rt').read(),
package_dir={'ngenix_test': 'ngenix_test'},
include_package_data=True,
install_requires=[
],
cmdclass=commands,
entry_points='''
[console_scripts]
nginx-test=nginx_test.run:main
'''
)
| true | true |
79019ffe232bf2e663c2267cee507d701388e4be | 8,841 | py | Python | wandb/fastai/__init__.py | MPGek/client | 541d760c5cb8776b1ad5fcf1362d7382811cbc61 | [
"Apache-2.0"
] | 1 | 2020-08-20T14:02:47.000Z | 2020-08-20T14:02:47.000Z | wandb/fastai/__init__.py | MPGek/client | 541d760c5cb8776b1ad5fcf1362d7382811cbc61 | [
"Apache-2.0"
] | null | null | null | wandb/fastai/__init__.py | MPGek/client | 541d760c5cb8776b1ad5fcf1362d7382811cbc61 | [
"Apache-2.0"
] | null | null | null | '''
This module hooks fast.ai Learners to Weights & Biases through a callback.
Requested logged data can be configured through the callback constructor.
Examples:
WandbCallback can be used when initializing the Learner::
```
from wandb.fastai import WandbCallback
[...]
learn = Learner(data, ..., callback_fns=WandbCallback)
learn.fit(epochs)
```
Custom parameters can be given using functools.partial::
```
from wandb.fastai import WandbCallback
from functools import partial
[...]
learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))
learn.fit(epochs)
```
Finally, it is possible to use WandbCallback only when starting
training. In this case it must be instantiated::
```
learn.fit(..., callbacks=WandbCallback(learn))
```
or, with custom parameters::
```
learn.fit(..., callbacks=WandbCallback(learn, ...))
```
'''
import wandb
import fastai
from fastai.callbacks import TrackerCallback
from pathlib import Path
import random
try:
import matplotlib
matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)
import matplotlib.pyplot as plt
except:
print('Warning: matplotlib required if logging sample image predictions')
class WandbCallback(TrackerCallback):
"""
Automatically saves model topology, losses & metrics.
Optionally logs weights, gradients, sample predictions and best trained model.
Args:
learn (fastai.basic_train.Learner): the fast.ai learner to hook.
log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged.
save_model (bool): save model at the end of each epoch. It will also load best model at the end of training.
monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.
mode (str): "auto", "min" or "max" to compare "monitor" values and define best model.
input_type (str): "images" or None. Used to display sample predictions.
validation_data (list): data used for sample predictions if input_type is set.
predictions (int): number of predictions to make if input_type is set and validation_data is None.
seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None.
"""
# Record if watch has been called previously (even in another instance)
_watch_called = False
def __init__(self,
learn,
log="gradients",
save_model=True,
monitor=None,
mode='auto',
input_type=None,
validation_data=None,
predictions=36,
seed=12345):
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError(
'You must call wandb.init() before WandbCallback()')
# Adapted from fast.ai "SaveModelCallback"
if monitor is None:
# use default TrackerCallback monitor value
super().__init__(learn, mode=mode)
else:
super().__init__(learn, monitor=monitor, mode=mode)
self.save_model = save_model
self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'
self.log = log
self.input_type = input_type
self.best = None
# Select items for sample predictions to see evolution along training
self.validation_data = validation_data
if input_type and not self.validation_data:
wandbRandom = random.Random(seed) # For repeatability
predictions = min(predictions, len(learn.data.valid_ds))
indices = wandbRandom.sample(range(len(learn.data.valid_ds)),
predictions)
self.validation_data = [learn.data.valid_ds[i] for i in indices]
def on_train_begin(self, **kwargs):
"Call watch method to log model topology, gradients & weights"
# Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback"
super().on_train_begin()
# Ensure we don't call "watch" multiple times
if not WandbCallback._watch_called:
WandbCallback._watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
"Logs training loss, validation loss and custom metrics & log prediction samples & save model"
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(
'Better model found at epoch {} with {} value: {}.'.format(
epoch, self.monitor, current))
self.best = current
# Save within wandb folder
with self.model_path.open('wb') as model_file:
self.learn.save(model_file)
# Log sample predictions if learn.predict is available
if self.validation_data:
try:
self._wandb_log_predictions()
except FastaiError as e:
wandb.termwarn(e.message)
self.validation_data = None # prevent from trying again on next loop
except Exception as e:
wandb.termwarn("Unable to log prediction samples.\n{}".format(e))
self.validation_data=None # prevent from trying again on next loop
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] +
last_metrics))
}
wandb.log(logs)
def on_train_end(self, **kwargs):
"Load the best model."
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
if self.model_path.is_file():
with self.model_path.open('rb') as model_file:
self.learn.load(model_file, purge=False)
print('Loaded best saved model from {}'.format(
self.model_path))
def _wandb_log_predictions(self):
"Log prediction samples"
pred_log = []
for x, y in self.validation_data:
try:
pred=self.learn.predict(x)
except:
raise FastaiError('Unable to run "predict" method from Learner to log prediction samples.')
# scalar -> likely to be a category
if not pred[1].shape:
pred_log.append(
wandb.Image(
x.data,
caption='Ground Truth: {}\nPrediction: {}'.format(
y, pred[0])))
# most vision datasets have a "show" function we can use
elif hasattr(x, "show"):
# log input data
pred_log.append(
wandb.Image(x.data, caption='Input data', grouping=3))
# log label and prediction
for im, capt in ((pred[0], "Prediction"),
(y, "Ground Truth")):
# Resize plot to image resolution
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = x.size
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Superpose label or prediction to input image
x.show(ax=ax, y=im)
pred_log.append(wandb.Image(fig, caption=capt))
plt.close(fig)
# likely to be an image
elif hasattr(y, "shape") and (
(len(y.shape) == 2) or
(len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):
pred_log.extend([
wandb.Image(x.data, caption='Input data', grouping=3),
wandb.Image(pred[0].data, caption='Prediction'),
wandb.Image(y.data, caption='Ground Truth')
])
# we just log input data
else:
pred_log.append(wandb.Image(x.data, caption='Input data'))
wandb.log({"Prediction Samples": pred_log}, commit=False)
class FastaiError(wandb.Error):
pass
| 37.944206 | 120 | 0.575274 | import wandb
import fastai
from fastai.callbacks import TrackerCallback
from pathlib import Path
import random
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
print('Warning: matplotlib required if logging sample image predictions')
class WandbCallback(TrackerCallback):
_watch_called = False
def __init__(self,
learn,
log="gradients",
save_model=True,
monitor=None,
mode='auto',
input_type=None,
validation_data=None,
predictions=36,
seed=12345):
if wandb.run is None:
raise ValueError(
'You must call wandb.init() before WandbCallback()')
if monitor is None:
super().__init__(learn, mode=mode)
else:
super().__init__(learn, monitor=monitor, mode=mode)
self.save_model = save_model
self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'
self.log = log
self.input_type = input_type
self.best = None
self.validation_data = validation_data
if input_type and not self.validation_data:
wandbRandom = random.Random(seed)
predictions = min(predictions, len(learn.data.valid_ds))
indices = wandbRandom.sample(range(len(learn.data.valid_ds)),
predictions)
self.validation_data = [learn.data.valid_ds[i] for i in indices]
def on_train_begin(self, **kwargs):
super().on_train_begin()
if not WandbCallback._watch_called:
WandbCallback._watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(
'Better model found at epoch {} with {} value: {}.'.format(
epoch, self.monitor, current))
self.best = current
# Save within wandb folder
with self.model_path.open('wb') as model_file:
self.learn.save(model_file)
# Log sample predictions if learn.predict is available
if self.validation_data:
try:
self._wandb_log_predictions()
except FastaiError as e:
wandb.termwarn(e.message)
self.validation_data = None # prevent from trying again on next loop
except Exception as e:
wandb.termwarn("Unable to log prediction samples.\n{}".format(e))
self.validation_data=None # prevent from trying again on next loop
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] +
last_metrics))
}
wandb.log(logs)
def on_train_end(self, **kwargs):
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
if self.model_path.is_file():
with self.model_path.open('rb') as model_file:
self.learn.load(model_file, purge=False)
print('Loaded best saved model from {}'.format(
self.model_path))
def _wandb_log_predictions(self):
pred_log = []
for x, y in self.validation_data:
try:
pred=self.learn.predict(x)
except:
raise FastaiError('Unable to run "predict" method from Learner to log prediction samples.')
# scalar -> likely to be a category
if not pred[1].shape:
pred_log.append(
wandb.Image(
x.data,
caption='Ground Truth: {}\nPrediction: {}'.format(
y, pred[0])))
# most vision datasets have a "show" function we can use
elif hasattr(x, "show"):
# log input data
pred_log.append(
wandb.Image(x.data, caption='Input data', grouping=3))
# log label and prediction
for im, capt in ((pred[0], "Prediction"),
(y, "Ground Truth")):
# Resize plot to image resolution
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = x.size
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Superpose label or prediction to input image
x.show(ax=ax, y=im)
pred_log.append(wandb.Image(fig, caption=capt))
plt.close(fig)
# likely to be an image
elif hasattr(y, "shape") and (
(len(y.shape) == 2) or
(len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):
pred_log.extend([
wandb.Image(x.data, caption='Input data', grouping=3),
wandb.Image(pred[0].data, caption='Prediction'),
wandb.Image(y.data, caption='Ground Truth')
])
# we just log input data
else:
pred_log.append(wandb.Image(x.data, caption='Input data'))
wandb.log({"Prediction Samples": pred_log}, commit=False)
class FastaiError(wandb.Error):
pass
| true | true |
7901a00a8ef641d86ee6c4066844159e728c9071 | 1,193 | py | Python | app/controllers/auth/register.py | TheSynt4x/flask-blog | 11176c15e390f5652ad286b5395f5a27af1c9989 | [
"MIT"
] | null | null | null | app/controllers/auth/register.py | TheSynt4x/flask-blog | 11176c15e390f5652ad286b5395f5a27af1c9989 | [
"MIT"
] | null | null | null | app/controllers/auth/register.py | TheSynt4x/flask-blog | 11176c15e390f5652ad286b5395f5a27af1c9989 | [
"MIT"
] | null | null | null | from flask import render_template, flash, redirect, url_for, request
from flask.views import MethodView
from app.middleware import auth
from app.models.user import User
from app.validators.register_form import RegisterForm
from app.services import avatar_service
class RegisterController(MethodView):
@auth.optional
def get(self):
"""
Show register form
Returns:
Register template with form
"""
return render_template('auth/register.html', form=RegisterForm())
@auth.optional
def post(self):
"""
Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors
"""
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if 'avatar' in request.files and request.files['avatar']:
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form)
| 25.934783 | 74 | 0.709975 | from flask import render_template, flash, redirect, url_for, request
from flask.views import MethodView
from app.middleware import auth
from app.models.user import User
from app.validators.register_form import RegisterForm
from app.services import avatar_service
class RegisterController(MethodView):
@auth.optional
def get(self):
return render_template('auth/register.html', form=RegisterForm())
@auth.optional
def post(self):
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if 'avatar' in request.files and request.files['avatar']:
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form)
| true | true |
7901a0eb62057284280314ec3af6eb03662f1df9 | 1,602 | py | Python | hue/hue_api.py | BenDoan/playground | 2d9dea78eccb22c7118414b163fb434c52eec078 | [
"MIT"
] | 1 | 2015-05-24T08:36:04.000Z | 2015-05-24T08:36:04.000Z | hue/hue_api.py | BenDoan/playground | 2d9dea78eccb22c7118414b163fb434c52eec078 | [
"MIT"
] | 9 | 2021-02-08T20:47:00.000Z | 2022-02-18T03:22:11.000Z | hue/hue_api.py | BenDoan/playground | 2d9dea78eccb22c7118414b163fb434c52eec078 | [
"MIT"
] | null | null | null | import json
import requests
HUE_NUPNP_URL = "https://www.meethue.com/api/nupnp"
class APIException(Exception):
pass
class HueAPI(object):
def __init__(self, username):
self.username = username
self.ip = self.discover_hub_ip()
@property
def base_url(self):
return "http://{}/api/{}".format(self.ip, self.username)
def get_groups(self):
url = "{}/groups".format(self.base_url)
try:
r = requests.get(url)
except:
raise APIException("Failed to send group get GET")
try:
return list(r.json().keys())
except:
raise APIException("Failed to decode group get json response")
def set_group(self, group_id, state):
url = "{}/groups/{}/action".format(self.base_url, group_id)
try:
r = requests.put(url, data=json.dumps({"on": state}))
except:
raise APIException("Failed to send group set PUT")
def set_groups(self, state):
for group in self.get_groups():
self.set_group(group, state)
def discover_hub_ip(self):
try:
r = requests.get(HUE_NUPNP_URL)
except:
raise APIException("Failed to send hub ip GET")
try:
json_resp = r.json()
except:
raise APIException("Failed to decode hub ip json response")
if len(json_resp) > 0:
return [0]['internalipaddress']
else:
raise APIException("Failed to find hub ip")
def _main():
pass
if __name__ == '__main__':
_main()
| 23.910448 | 74 | 0.576779 | import json
import requests
HUE_NUPNP_URL = "https://www.meethue.com/api/nupnp"
class APIException(Exception):
pass
class HueAPI(object):
def __init__(self, username):
self.username = username
self.ip = self.discover_hub_ip()
@property
def base_url(self):
return "http://{}/api/{}".format(self.ip, self.username)
def get_groups(self):
url = "{}/groups".format(self.base_url)
try:
r = requests.get(url)
except:
raise APIException("Failed to send group get GET")
try:
return list(r.json().keys())
except:
raise APIException("Failed to decode group get json response")
def set_group(self, group_id, state):
url = "{}/groups/{}/action".format(self.base_url, group_id)
try:
r = requests.put(url, data=json.dumps({"on": state}))
except:
raise APIException("Failed to send group set PUT")
def set_groups(self, state):
for group in self.get_groups():
self.set_group(group, state)
def discover_hub_ip(self):
try:
r = requests.get(HUE_NUPNP_URL)
except:
raise APIException("Failed to send hub ip GET")
try:
json_resp = r.json()
except:
raise APIException("Failed to decode hub ip json response")
if len(json_resp) > 0:
return [0]['internalipaddress']
else:
raise APIException("Failed to find hub ip")
def _main():
pass
if __name__ == '__main__':
_main()
| true | true |
7901a103c51919e85415fd5af4bf3af003105056 | 1,330 | py | Python | fastapi_example/util/auth_util.py | pkyosx/fastapi-example | 234b2da6b3d60989f9e75483671bc0c2710592bd | [
"MIT"
] | null | null | null | fastapi_example/util/auth_util.py | pkyosx/fastapi-example | 234b2da6b3d60989f9e75483671bc0c2710592bd | [
"MIT"
] | null | null | null | fastapi_example/util/auth_util.py | pkyosx/fastapi-example | 234b2da6b3d60989f9e75483671bc0c2710592bd | [
"MIT"
] | null | null | null | import time
from dataclasses import dataclass
import jwt
from util.enum_util import EnumBase
class Role(EnumBase):
USER = "USER"
ADMIN = "ADMIN"
class Perm(EnumBase):
NONE = "NONE"
READ_MSG = "READ_MSG"
WRITE_MSG = "WRITE_MSG"
@dataclass
class Identity:
user: str
role: Role.to_enum()
perm_mapping = {
Role.USER: [Perm.READ_MSG],
Role.ADMIN: [Perm.READ_MSG, Perm.WRITE_MSG],
}
def has_permission(self, perm: Perm) -> bool:
return perm in self.perm_mapping[self.role]
class JWTAuthenticator(object):
ACCESS_JWT_ALGORITHM = "HS256"
@classmethod
def dump_access_token(cls, key: str, identity: Identity, exp: int) -> str:
current_ts = int(time.time())
return jwt.encode(
payload=dict(
user=identity.user,
role=identity.role,
nbf=current_ts - 300, # not before
exp=current_ts + exp,
),
key=key,
algorithm=cls.ACCESS_JWT_ALGORITHM,
)
@classmethod
def load_access_token(cls, key: str, access_token: str) -> Identity:
payload = jwt.decode(
jwt=access_token, key=key, algorithms=[cls.ACCESS_JWT_ALGORITHM]
)
return Identity(user=payload["user"], role=payload["role"])
| 23.333333 | 78 | 0.605263 | import time
from dataclasses import dataclass
import jwt
from util.enum_util import EnumBase
class Role(EnumBase):
USER = "USER"
ADMIN = "ADMIN"
class Perm(EnumBase):
NONE = "NONE"
READ_MSG = "READ_MSG"
WRITE_MSG = "WRITE_MSG"
@dataclass
class Identity:
user: str
role: Role.to_enum()
perm_mapping = {
Role.USER: [Perm.READ_MSG],
Role.ADMIN: [Perm.READ_MSG, Perm.WRITE_MSG],
}
def has_permission(self, perm: Perm) -> bool:
return perm in self.perm_mapping[self.role]
class JWTAuthenticator(object):
ACCESS_JWT_ALGORITHM = "HS256"
@classmethod
def dump_access_token(cls, key: str, identity: Identity, exp: int) -> str:
current_ts = int(time.time())
return jwt.encode(
payload=dict(
user=identity.user,
role=identity.role,
nbf=current_ts - 300,
exp=current_ts + exp,
),
key=key,
algorithm=cls.ACCESS_JWT_ALGORITHM,
)
@classmethod
def load_access_token(cls, key: str, access_token: str) -> Identity:
payload = jwt.decode(
jwt=access_token, key=key, algorithms=[cls.ACCESS_JWT_ALGORITHM]
)
return Identity(user=payload["user"], role=payload["role"])
| true | true |
7901a15e89880b5c6796a693c8ee4e1f1b87d075 | 42,243 | py | Python | devito/passes/clusters/aliases.py | garg-aayush/devito | b1e8fffdee7d6b556ff19a372d69ed1aebee675a | [
"MIT"
] | 1 | 2021-05-31T04:56:33.000Z | 2021-05-31T04:56:33.000Z | devito/passes/clusters/aliases.py | garg-aayush/devito | b1e8fffdee7d6b556ff19a372d69ed1aebee675a | [
"MIT"
] | null | null | null | devito/passes/clusters/aliases.py | garg-aayush/devito | b1e8fffdee7d6b556ff19a372d69ed1aebee675a | [
"MIT"
] | null | null | null | from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
| 35.173189 | 90 | 0.581848 | from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
if not compare_ops(c.expr, u.expr):
continue
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
if not candidates:
return exprs, retained, tot
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
if not retained:
return exprs, retained, tot
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
interval = interval.lift(i.stamp)
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# as much room as in `zi`'s parent to avoid going OOB
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
if rotation.union(interval) != rotation:
continue
min_interval = self._pivot_min_intervals[d].translate(-distance)
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
| true | true |
7901a174287c6ae84ab3d0881bd2c6713655d9cf | 1,879 | py | Python | tests/incident/test_get.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | tests/incident/test_get.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | tests/incident/test_get.py | asyncee/pycamunda | f4834d224ff99fcf80874efeaedf68a8a2efa926 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_get_params(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
assert get_incident.url == engine_url + '/incident/anId'
assert get_incident.query_parameters() == {}
assert get_incident.body_parameters() == {}
@unittest.mock.patch('pycamunda.incident.Incident.load', unittest.mock.MagicMock())
@unittest.mock.patch('requests.Session.request')
def test_get_calls_requests(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'GET'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_get_raises_pycamunda_exception(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
get_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.incident.Incident', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_get_raises_for_status(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base.from_isoformat', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.incident.IncidentType', unittest.mock.MagicMock())
def test_get_returns_incident(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
incident = get_incident()
assert isinstance(incident, pycamunda.incident.Incident)
| 34.796296 | 83 | 0.77009 |
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_get_params(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
assert get_incident.url == engine_url + '/incident/anId'
assert get_incident.query_parameters() == {}
assert get_incident.body_parameters() == {}
@unittest.mock.patch('pycamunda.incident.Incident.load', unittest.mock.MagicMock())
@unittest.mock.patch('requests.Session.request')
def test_get_calls_requests(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'GET'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_get_raises_pycamunda_exception(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
get_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.incident.Incident', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_get_raises_for_status(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base.from_isoformat', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.incident.IncidentType', unittest.mock.MagicMock())
def test_get_returns_incident(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
incident = get_incident()
assert isinstance(incident, pycamunda.incident.Incident)
| true | true |
7901a26f2959e2b1afa84e883181b3ce059e4fa4 | 25,608 | py | Python | lib/googlecloudsdk/api_lib/compute/containers_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/compute/containers_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/api_lib/compute/containers_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating GCE container (Docker) deployments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import re
import enum
from googlecloudsdk.api_lib.compute import exceptions
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
USER_INIT_TEMPLATE = """#cloud-config
runcmd:
- ['/usr/bin/kubelet',
'--allow-privileged=%s',
'--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest',
'--manifest-url-header=Metadata-Flavor:Google',
'--config=/etc/kubernetes/manifests']
"""
MANIFEST_DISCLAIMER = """# DISCLAIMER:
# This container declaration format is not a public API and may change without
# notice. Please use gcloud command-line tool or Google Cloud Console to run
# Containers on Google Compute Engine.
"""
USER_DATA_KEY = 'user-data'
CONTAINER_MANIFEST_KEY = 'google-container-manifest'
GCE_CONTAINER_DECLARATION = 'gce-container-declaration'
STACKDRIVER_LOGGING_AGENT_CONFIGURATION = 'google-logging-enabled'
GKE_DOCKER = 'gci-ensure-gke-docker'
ALLOWED_PROTOCOLS = ['TCP', 'UDP']
# Prefix of all COS image major release names
COS_MAJOR_RELEASE_PREFIX = 'cos-stable-'
# Pin this version of gcloud to COS image major release version
COS_MAJOR_RELEASE = COS_MAJOR_RELEASE_PREFIX + '55'
COS_PROJECT = 'cos-cloud'
_MIN_PREFERRED_COS_VERSION = 63
# Translation from CLI to API wording
RESTART_POLICY_API = {
'never': 'Never',
'on-failure': 'OnFailure',
'always': 'Always'
}
class MountVolumeMode(enum.Enum):
READ_ONLY = 1,
READ_WRITE = 2,
def isReadOnly(self):
return self == MountVolumeMode.READ_ONLY
_DEFAULT_MODE = MountVolumeMode.READ_WRITE
def _GetUserInit(allow_privileged):
"""Gets user-init metadata value for COS image."""
allow_privileged_val = 'true' if allow_privileged else 'false'
return USER_INIT_TEMPLATE % (allow_privileged_val)
class Error(exceptions.Error):
"""Base exception for containers."""
class InvalidMetadataKeyException(Error):
"""InvalidMetadataKeyException is for not allowed metadata keys."""
def __init__(self, metadata_key):
super(InvalidMetadataKeyException, self).__init__(
'Metadata key "{0}" is not allowed when running containerized VM.'
.format(metadata_key))
class NoGceContainerDeclarationMetadataKey(Error):
"""Raised on attempt to update-container on instance without containers."""
def __init__(self):
super(NoGceContainerDeclarationMetadataKey, self).__init__(
"Instance doesn't have {} metadata key - it is not a container.".format(
GCE_CONTAINER_DECLARATION))
def ValidateUserMetadata(metadata):
"""Validates if user-specified metadata.
Checks if it contains values which may conflict with container deployment.
Args:
metadata: user-specified VM metadata.
Raises:
InvalidMetadataKeyException: if there is conflict with user-provided
metadata
"""
for entry in metadata.items:
if entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY, GKE_DOCKER]:
raise InvalidMetadataKeyException(entry.key)
def CreateTagsMessage(messages, tags):
"""Create tags message with parameters for container VM or VM templates."""
if tags:
return messages.Tags(items=tags)
def GetLabelsMessageWithCosVersion(
labels, image_uri, resources, resource_class):
"""Returns message with labels for instance / instance template.
Args:
labels: dict, labels to assign to the resource.
image_uri: URI of image used as a base for the resource. The function
extracts COS version from the URI and uses it as a value of
`container-vm` label.
resources: object that can parse image_uri.
resource_class: class of the resource to which labels will be assigned.
Must contain LabelsValue class and
resource_class.LabelsValue must contain AdditionalProperty
class.
"""
cos_version = resources.Parse(
image_uri, collection='compute.images').Name().replace('/', '-')
if labels is None:
labels = {}
labels['container-vm'] = cos_version
additional_properties = [
resource_class.LabelsValue.AdditionalProperty(key=k, value=v)
for k, v in sorted(six.iteritems(labels))]
return resource_class.LabelsValue(additionalProperties=additional_properties)
class NoCosImageException(Error):
"""Raised when COS image could not be found."""
def __init__(self):
super(NoCosImageException, self).__init__(
'Could not find COS (Cloud OS) for release family \'{0}\''
.format(COS_MAJOR_RELEASE))
def ExpandCosImageFlag(compute_client):
"""Select a COS image to run Docker."""
compute = compute_client.apitools_client
images = compute_client.MakeRequests([(
compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT)
)])
return _SelectNewestCosImage(images)
def _SelectNewestCosImage(images):
"""Selects newest COS image from the list."""
cos_images = sorted([image for image in images
if image.name.startswith(COS_MAJOR_RELEASE)],
key=lambda x: times.ParseDateTime(x.creationTimestamp))
if not cos_images:
raise NoCosImageException()
return cos_images[-1].selfLink
def _ValidateAndParsePortMapping(port_mappings):
"""Parses and validates port mapping."""
ports_config = []
for port_mapping in port_mappings:
mapping_match = re.match(r'^(\d+):(\d+):(\S+)$', port_mapping)
if not mapping_match:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.')
port, target_port, protocol = mapping_match.groups()
if protocol not in ALLOWED_PROTOCOLS:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Protocol should be one of [{0}]'.format(
', '.join(ALLOWED_PROTOCOLS)))
ports_config.append({
'containerPort': int(target_port),
'hostPort': int(port),
'protocol': protocol})
return ports_config
def ExpandKonletCosImageFlag(compute_client):
"""Select a COS image to run Konlet.
This function scans three families in order:
- stable
- beta
- dev
looking for the first image with version at least _MIN_PREFERRED_COS_VERSION.
Args:
compute_client: ClientAdapter, The Compute API client adapter
Returns:
COS image at version _MIN_PREFERRED_COS_VERSION or later.
Raises:
NoCosImageException: No COS image at version at least
_MIN_PREFERRED_COS_VERSION was found. This should not happen if backend is
healthy.
"""
compute = compute_client.apitools_client
images = compute_client.MakeRequests(
[(compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))])
name_re_template = r'cos-{}-(\d+)-.*'
image_families = ['stable', 'beta', 'dev']
for family in image_families:
name_re = name_re_template.format(family)
def MakeCreateComparisonKey(name_re):
def CreateComparisonKey(image):
version = int(re.match(name_re, image.name).group(1))
timestamp = times.ParseDateTime(image.creationTimestamp)
return version, timestamp
return CreateComparisonKey
cos_images = sorted(
[image for image in images if re.match(name_re, image.name)],
key=MakeCreateComparisonKey(name_re))
if (cos_images and MakeCreateComparisonKey(name_re)(cos_images[-1])[0] >=
_MIN_PREFERRED_COS_VERSION):
return cos_images[-1].selfLink
raise NoCosImageException()
def _ReadDictionary(filename):
# pylint:disable=line-too-long
r"""Read environment variable from file.
File format:
It is intended (but not guaranteed) to follow standard docker format
[](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file)
but without capturing environment variables from host machine.
Lines starting by "#" character are comments.
Empty lines are ignored.
Below grammar production follow in EBNF format.
file = (whitespace* statement '\n')*
statement = comment
| definition
whitespace = ' '
| '\t'
comment = '#' [^\n]*
definition = [^#=\n] [^= \t\n]* '=' [^\n]*
Args:
filename: str, name of the file to read
Returns:
A dictionary mapping environment variable names to their values.
"""
env_vars = {}
if not filename:
return env_vars
with files.FileReader(filename) as f:
for i, line in enumerate(f):
# Strip whitespace at the beginning and end of line
line = line.strip()
# Ignore comments and empty lines
if len(line) <= 1 or line[0] == '#':
continue
# Find first left '=' character
assignment_op_loc = line.find('=')
if assignment_op_loc == -1:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{}: Expected VAR=VAL, got {}'.format(
filename, i, line))
env = line[:assignment_op_loc]
val = line[assignment_op_loc+1:]
if ' ' in env or '\t' in env:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{} Variable name cannot contain whitespaces,'
' got "{}"'.format(filename, i, env))
env_vars[env] = val
return env_vars
def _GetHostPathDiskName(idx):
return 'host-path-{}'.format(idx)
def _GetTmpfsDiskName(idx):
return 'tmpfs-{}'.format(idx)
def _GetPersistentDiskName(idx):
return 'pd-{}'.format(idx)
def _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=None, disks=None):
"""Add volume specs from --container-mount-disk."""
used_names = used_names or []
disks = disks or []
idx = 0
for mount_disk in container_mount_disk:
while _GetPersistentDiskName(idx) in used_names:
idx += 1
device_name = mount_disk.get('name')
partition = mount_disk.get('partition')
def _GetMatchingVolume(device_name, partition):
for volume_spec in volumes:
pd = volume_spec.get('gcePersistentDisk', {})
if (pd.get('pdName') == device_name
and pd.get('partition') == partition):
return volume_spec
repeated = _GetMatchingVolume(device_name, partition)
if repeated:
name = repeated['name']
else:
name = _GetPersistentDiskName(idx)
used_names.append(name)
if not device_name:
# This should not be needed - any command that accepts container mount
# disks should validate that there is only one disk before calling this
# function.
if len(disks) != 1:
raise calliope_exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must specify the name of the disk to be mounted unless exactly '
'one disk is attached to the instance.')
device_name = disks[0].get('name')
if disks[0].get('device-name', device_name) != device_name:
raise exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must not have a device-name that is different from disk name if '
'disk is being attached to the instance and mounted to a container:'
' [{}]'.format(disks[0].get('device-name')))
volume_mounts.append({
'name': name,
'mountPath': mount_disk['mount-path'],
'readOnly': mount_disk.get('mode', _DEFAULT_MODE).isReadOnly()})
if repeated:
continue
volume_spec = {
'name': name,
'gcePersistentDisk': {
'pdName': device_name,
'fsType': 'ext4'}}
if partition:
volume_spec['gcePersistentDisk'].update({'partition': partition})
volumes.append(volume_spec)
idx += 1
def _CreateContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Create container manifest from argument namespace and instance name."""
container = {'image': args.container_image, 'name': instance_name}
if args.container_command is not None:
container['command'] = [args.container_command]
if args.container_arg is not None:
container['args'] = args.container_arg
container['stdin'] = args.container_stdin
container['tty'] = args.container_tty
container['securityContext'] = {'privileged': args.container_privileged}
env_vars = _ReadDictionary(args.container_env_file)
for env_var_dict in args.container_env or []:
for env, val in six.iteritems(env_var_dict):
env_vars[env] = val
if env_vars:
container['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(env_vars)]
volumes = []
volume_mounts = []
for idx, volume in enumerate(args.container_mount_host_path or []):
volumes.append({
'name': _GetHostPathDiskName(idx),
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': _GetHostPathDiskName(idx),
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for idx, tmpfs in enumerate(args.container_mount_tmpfs or []):
volumes.append(
{'name': _GetTmpfsDiskName(idx), 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append(
{'name': _GetTmpfsDiskName(idx), 'mountPath': tmpfs['mount-path']})
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = (args.disk or []) + (args.create_disk or [])
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
disks=disks)
container['volumeMounts'] = volume_mounts
manifest = {
'spec': {
'containers': [container],
'volumes': volumes,
'restartPolicy': RESTART_POLICY_API[args.container_restart_policy]
}
}
return manifest
def DumpYaml(data):
"""Dumps data dict to YAML in format expected by Konlet."""
return MANIFEST_DISCLAIMER + yaml.dump(data)
def _CreateYamlContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Helper to create the container manifest."""
return DumpYaml(_CreateContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk))
def CreateKonletMetadataMessage(messages, args, instance_name, user_metadata,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Helper to create the metadata for konlet."""
konlet_metadata = {
GCE_CONTAINER_DECLARATION:
_CreateYamlContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk),
# Since COS 69, having logs for Container-VMs written requires enabling
# Stackdriver Logging agent.
STACKDRIVER_LOGGING_AGENT_CONFIGURATION: 'true',
}
return metadata_utils.ConstructMetadataMessage(
messages, metadata=konlet_metadata, existing_metadata=user_metadata)
def UpdateInstance(holder, client, instance_ref, instance, args,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Update an instance and its container metadata."""
# find gce-container-declaration metadata entry
for metadata in instance.metadata.items:
if metadata.key == GCE_CONTAINER_DECLARATION:
UpdateMetadata(
holder, metadata, args, instance,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk)
# update Google Compute Engine resource
operation = client.apitools_client.instances.SetMetadata(
client.messages.ComputeInstancesSetMetadataRequest(
metadata=instance.metadata, **instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
set_metadata_waiter = waiter.WaitFor(
operation_poller, operation_ref,
'Updating specification of container [{0}]'.format(
instance_ref.Name()))
if (instance.status ==
client.messages.Instance.StatusValueValuesEnum.TERMINATED):
return set_metadata_waiter
elif (instance.status ==
client.messages.Instance.StatusValueValuesEnum.SUSPENDED):
return _StopVm(holder, client, instance_ref)
else:
_StopVm(holder, client, instance_ref)
return _StartVm(holder, client, instance_ref)
raise NoGceContainerDeclarationMetadataKey()
def _StopVm(holder, client, instance_ref):
"""Stop the Virtual Machine."""
operation = client.apitools_client.instances.Stop(
client.messages.ComputeInstancesStopRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Stopping instance [{0}]'.format(instance_ref.Name()))
def _StartVm(holder, client, instance_ref):
"""Start the Virtual Machine."""
operation = client.apitools_client.instances.Start(
client.messages.ComputeInstancesStartRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Starting instance [{0}]'.format(instance_ref.Name()))
def UpdateMetadata(holder, metadata, args, instance,
container_mount_disk_enabled=False,
container_mount_disk=None):
"""Update konlet metadata entry using user-supplied data."""
# precondition: metadata.key == GCE_CONTAINER_DECLARATION
manifest = yaml.load(metadata.value)
if args.IsSpecified('container_image'):
manifest['spec']['containers'][0]['image'] = args.container_image
if args.IsSpecified('container_command'):
manifest['spec']['containers'][0]['command'] = [args.container_command]
if args.IsSpecified('clear_container_command'):
manifest['spec']['containers'][0].pop('command', None)
if args.IsSpecified('container_arg'):
manifest['spec']['containers'][0]['args'] = args.container_arg
if args.IsSpecified('clear_container_args'):
manifest['spec']['containers'][0].pop('args', None)
if args.container_privileged is True:
manifest['spec']['containers'][0]['securityContext']['privileged'] = True
if args.container_privileged is False:
manifest['spec']['containers'][0]['securityContext']['privileged'] = False
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = instance.disks
else:
container_mount_disk = []
# Only need disks for updating the container mount disk.
disks = []
_UpdateMounts(holder, manifest, args.remove_container_mounts or [],
args.container_mount_host_path or [],
args.container_mount_tmpfs or [],
container_mount_disk,
disks)
_UpdateEnv(manifest,
itertools.chain.from_iterable(args.remove_container_env or []),
args.container_env_file, args.container_env or [])
if args.container_stdin is True:
manifest['spec']['containers'][0]['stdin'] = True
if args.container_stdin is False:
manifest['spec']['containers'][0]['stdin'] = False
if args.container_tty is True:
manifest['spec']['containers'][0]['tty'] = True
if args.container_tty is False:
manifest['spec']['containers'][0]['tty'] = False
if args.IsSpecified('container_restart_policy'):
manifest['spec']['restartPolicy'] = RESTART_POLICY_API[
args.container_restart_policy]
metadata.value = DumpYaml(manifest)
def _UpdateMounts(holder, manifest, remove_container_mounts,
container_mount_host_path, container_mount_tmpfs,
container_mount_disk, disks):
"""Updates mounts in container manifest."""
_CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs,
container_mount_disk=container_mount_disk)
used_names = [volume['name'] for volume in manifest['spec']['volumes']]
volumes = []
volume_mounts = []
next_volume_index = 0
for volume in container_mount_host_path:
while _GetHostPathDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetHostPathDiskName(next_volume_index)
next_volume_index += 1
volumes.append({
'name': name,
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': name,
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for tmpfs in container_mount_tmpfs:
while _GetTmpfsDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetTmpfsDiskName(next_volume_index)
next_volume_index += 1
volumes.append({'name': name, 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append({'name': name, 'mountPath': tmpfs['mount-path']})
if container_mount_disk:
# Convert to dict to match helper input needs.
# The disk must already have a device name that matches its
# name. For disks that were attached to the instance already.
disks = [{'device-name': disk.deviceName,
'name': holder.resources.Parse(disk.source).Name()}
for disk in disks]
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=used_names, disks=disks)
manifest['spec']['containers'][0]['volumeMounts'].extend(volume_mounts)
manifest['spec']['volumes'].extend(volumes)
def _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs, container_mount_disk=None):
"""Remove all specified mounts from container manifest."""
container_mount_disk = container_mount_disk or []
# volumeMounts stored in this list should be removed
mount_paths_to_remove = remove_container_mounts[:]
for host_path in container_mount_host_path:
mount_paths_to_remove.append(host_path['mount-path'])
for tmpfs in container_mount_tmpfs:
mount_paths_to_remove.append(tmpfs['mount-path'])
for disk in container_mount_disk:
mount_paths_to_remove.append(disk['mount-path'])
# volumeMounts stored in this list are used
used_mounts = []
used_mounts_names = []
removed_mount_names = []
for mount in manifest['spec']['containers'][0].get('volumeMounts', []):
if mount['mountPath'] not in mount_paths_to_remove:
used_mounts.append(mount)
used_mounts_names.append(mount['name'])
else:
removed_mount_names.append(mount['name'])
# override volumeMounts
manifest['spec']['containers'][0]['volumeMounts'] = used_mounts
# garbage collect volumes which become orphaned, skip volumes orphaned before
# start of the procedure
used_volumes = []
for volume in manifest['spec'].get('volumes', []):
if (volume['name'] in used_mounts_names or
volume['name'] not in removed_mount_names):
used_volumes.append(volume)
# override volumes
manifest['spec']['volumes'] = used_volumes
def _UpdateEnv(manifest, remove_container_env, container_env_file,
container_env):
"""Update environment variables in container manifest."""
current_env = {}
for env_val in manifest['spec']['containers'][0].get('env', []):
current_env[env_val['name']] = env_val['value']
for env in remove_container_env:
current_env.pop(env, None)
current_env.update(_ReadDictionary(container_env_file))
for env_var_dict in container_env:
for env, val in six.iteritems(env_var_dict):
current_env[env] = val
if current_env:
manifest['spec']['containers'][0]['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(current_env)]
| 34.512129 | 117 | 0.694275 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import re
import enum
from googlecloudsdk.api_lib.compute import exceptions
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
USER_INIT_TEMPLATE = """#cloud-config
runcmd:
- ['/usr/bin/kubelet',
'--allow-privileged=%s',
'--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest',
'--manifest-url-header=Metadata-Flavor:Google',
'--config=/etc/kubernetes/manifests']
"""
MANIFEST_DISCLAIMER = """# DISCLAIMER:
# This container declaration format is not a public API and may change without
# notice. Please use gcloud command-line tool or Google Cloud Console to run
# Containers on Google Compute Engine.
"""
USER_DATA_KEY = 'user-data'
CONTAINER_MANIFEST_KEY = 'google-container-manifest'
GCE_CONTAINER_DECLARATION = 'gce-container-declaration'
STACKDRIVER_LOGGING_AGENT_CONFIGURATION = 'google-logging-enabled'
GKE_DOCKER = 'gci-ensure-gke-docker'
ALLOWED_PROTOCOLS = ['TCP', 'UDP']
COS_MAJOR_RELEASE_PREFIX = 'cos-stable-'
COS_MAJOR_RELEASE = COS_MAJOR_RELEASE_PREFIX + '55'
COS_PROJECT = 'cos-cloud'
_MIN_PREFERRED_COS_VERSION = 63
RESTART_POLICY_API = {
'never': 'Never',
'on-failure': 'OnFailure',
'always': 'Always'
}
class MountVolumeMode(enum.Enum):
READ_ONLY = 1,
READ_WRITE = 2,
def isReadOnly(self):
return self == MountVolumeMode.READ_ONLY
_DEFAULT_MODE = MountVolumeMode.READ_WRITE
def _GetUserInit(allow_privileged):
allow_privileged_val = 'true' if allow_privileged else 'false'
return USER_INIT_TEMPLATE % (allow_privileged_val)
class Error(exceptions.Error):
class InvalidMetadataKeyException(Error):
def __init__(self, metadata_key):
super(InvalidMetadataKeyException, self).__init__(
'Metadata key "{0}" is not allowed when running containerized VM.'
.format(metadata_key))
class NoGceContainerDeclarationMetadataKey(Error):
def __init__(self):
super(NoGceContainerDeclarationMetadataKey, self).__init__(
"Instance doesn't have {} metadata key - it is not a container.".format(
GCE_CONTAINER_DECLARATION))
def ValidateUserMetadata(metadata):
for entry in metadata.items:
if entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY, GKE_DOCKER]:
raise InvalidMetadataKeyException(entry.key)
def CreateTagsMessage(messages, tags):
if tags:
return messages.Tags(items=tags)
def GetLabelsMessageWithCosVersion(
labels, image_uri, resources, resource_class):
cos_version = resources.Parse(
image_uri, collection='compute.images').Name().replace('/', '-')
if labels is None:
labels = {}
labels['container-vm'] = cos_version
additional_properties = [
resource_class.LabelsValue.AdditionalProperty(key=k, value=v)
for k, v in sorted(six.iteritems(labels))]
return resource_class.LabelsValue(additionalProperties=additional_properties)
class NoCosImageException(Error):
def __init__(self):
super(NoCosImageException, self).__init__(
'Could not find COS (Cloud OS) for release family \'{0}\''
.format(COS_MAJOR_RELEASE))
def ExpandCosImageFlag(compute_client):
compute = compute_client.apitools_client
images = compute_client.MakeRequests([(
compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT)
)])
return _SelectNewestCosImage(images)
def _SelectNewestCosImage(images):
cos_images = sorted([image for image in images
if image.name.startswith(COS_MAJOR_RELEASE)],
key=lambda x: times.ParseDateTime(x.creationTimestamp))
if not cos_images:
raise NoCosImageException()
return cos_images[-1].selfLink
def _ValidateAndParsePortMapping(port_mappings):
ports_config = []
for port_mapping in port_mappings:
mapping_match = re.match(r'^(\d+):(\d+):(\S+)$', port_mapping)
if not mapping_match:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.')
port, target_port, protocol = mapping_match.groups()
if protocol not in ALLOWED_PROTOCOLS:
raise calliope_exceptions.InvalidArgumentException(
'--port-mappings',
'Protocol should be one of [{0}]'.format(
', '.join(ALLOWED_PROTOCOLS)))
ports_config.append({
'containerPort': int(target_port),
'hostPort': int(port),
'protocol': protocol})
return ports_config
def ExpandKonletCosImageFlag(compute_client):
compute = compute_client.apitools_client
images = compute_client.MakeRequests(
[(compute.images,
'List',
compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))])
name_re_template = r'cos-{}-(\d+)-.*'
image_families = ['stable', 'beta', 'dev']
for family in image_families:
name_re = name_re_template.format(family)
def MakeCreateComparisonKey(name_re):
def CreateComparisonKey(image):
version = int(re.match(name_re, image.name).group(1))
timestamp = times.ParseDateTime(image.creationTimestamp)
return version, timestamp
return CreateComparisonKey
cos_images = sorted(
[image for image in images if re.match(name_re, image.name)],
key=MakeCreateComparisonKey(name_re))
if (cos_images and MakeCreateComparisonKey(name_re)(cos_images[-1])[0] >=
_MIN_PREFERRED_COS_VERSION):
return cos_images[-1].selfLink
raise NoCosImageException()
def _ReadDictionary(filename):
# pylint:disable=line-too-long
env_vars = {}
if not filename:
return env_vars
with files.FileReader(filename) as f:
for i, line in enumerate(f):
# Strip whitespace at the beginning and end of line
line = line.strip()
# Ignore comments and empty lines
if len(line) <= 1 or line[0] == '
continue
# Find first left '=' character
assignment_op_loc = line.find('=')
if assignment_op_loc == -1:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{}: Expected VAR=VAL, got {}'.format(
filename, i, line))
env = line[:assignment_op_loc]
val = line[assignment_op_loc+1:]
if ' ' in env or '\t' in env:
raise calliope_exceptions.BadFileException(
'Syntax error in {}:{} Variable name cannot contain whitespaces,'
' got "{}"'.format(filename, i, env))
env_vars[env] = val
return env_vars
def _GetHostPathDiskName(idx):
return 'host-path-{}'.format(idx)
def _GetTmpfsDiskName(idx):
return 'tmpfs-{}'.format(idx)
def _GetPersistentDiskName(idx):
return 'pd-{}'.format(idx)
def _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=None, disks=None):
used_names = used_names or []
disks = disks or []
idx = 0
for mount_disk in container_mount_disk:
while _GetPersistentDiskName(idx) in used_names:
idx += 1
device_name = mount_disk.get('name')
partition = mount_disk.get('partition')
def _GetMatchingVolume(device_name, partition):
for volume_spec in volumes:
pd = volume_spec.get('gcePersistentDisk', {})
if (pd.get('pdName') == device_name
and pd.get('partition') == partition):
return volume_spec
repeated = _GetMatchingVolume(device_name, partition)
if repeated:
name = repeated['name']
else:
name = _GetPersistentDiskName(idx)
used_names.append(name)
if not device_name:
# This should not be needed - any command that accepts container mount
# disks should validate that there is only one disk before calling this
# function.
if len(disks) != 1:
raise calliope_exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must specify the name of the disk to be mounted unless exactly '
'one disk is attached to the instance.')
device_name = disks[0].get('name')
if disks[0].get('device-name', device_name) != device_name:
raise exceptions.InvalidArgumentException(
'--container-mount-disk',
'Must not have a device-name that is different from disk name if '
'disk is being attached to the instance and mounted to a container:'
' [{}]'.format(disks[0].get('device-name')))
volume_mounts.append({
'name': name,
'mountPath': mount_disk['mount-path'],
'readOnly': mount_disk.get('mode', _DEFAULT_MODE).isReadOnly()})
if repeated:
continue
volume_spec = {
'name': name,
'gcePersistentDisk': {
'pdName': device_name,
'fsType': 'ext4'}}
if partition:
volume_spec['gcePersistentDisk'].update({'partition': partition})
volumes.append(volume_spec)
idx += 1
def _CreateContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
container = {'image': args.container_image, 'name': instance_name}
if args.container_command is not None:
container['command'] = [args.container_command]
if args.container_arg is not None:
container['args'] = args.container_arg
container['stdin'] = args.container_stdin
container['tty'] = args.container_tty
container['securityContext'] = {'privileged': args.container_privileged}
env_vars = _ReadDictionary(args.container_env_file)
for env_var_dict in args.container_env or []:
for env, val in six.iteritems(env_var_dict):
env_vars[env] = val
if env_vars:
container['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(env_vars)]
volumes = []
volume_mounts = []
for idx, volume in enumerate(args.container_mount_host_path or []):
volumes.append({
'name': _GetHostPathDiskName(idx),
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': _GetHostPathDiskName(idx),
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for idx, tmpfs in enumerate(args.container_mount_tmpfs or []):
volumes.append(
{'name': _GetTmpfsDiskName(idx), 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append(
{'name': _GetTmpfsDiskName(idx), 'mountPath': tmpfs['mount-path']})
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = (args.disk or []) + (args.create_disk or [])
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
disks=disks)
container['volumeMounts'] = volume_mounts
manifest = {
'spec': {
'containers': [container],
'volumes': volumes,
'restartPolicy': RESTART_POLICY_API[args.container_restart_policy]
}
}
return manifest
def DumpYaml(data):
return MANIFEST_DISCLAIMER + yaml.dump(data)
def _CreateYamlContainerManifest(args, instance_name,
container_mount_disk_enabled=False,
container_mount_disk=None):
return DumpYaml(_CreateContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk))
def CreateKonletMetadataMessage(messages, args, instance_name, user_metadata,
container_mount_disk_enabled=False,
container_mount_disk=None):
konlet_metadata = {
GCE_CONTAINER_DECLARATION:
_CreateYamlContainerManifest(
args, instance_name,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk),
# Since COS 69, having logs for Container-VMs written requires enabling
# Stackdriver Logging agent.
STACKDRIVER_LOGGING_AGENT_CONFIGURATION: 'true',
}
return metadata_utils.ConstructMetadataMessage(
messages, metadata=konlet_metadata, existing_metadata=user_metadata)
def UpdateInstance(holder, client, instance_ref, instance, args,
container_mount_disk_enabled=False,
container_mount_disk=None):
# find gce-container-declaration metadata entry
for metadata in instance.metadata.items:
if metadata.key == GCE_CONTAINER_DECLARATION:
UpdateMetadata(
holder, metadata, args, instance,
container_mount_disk_enabled=container_mount_disk_enabled,
container_mount_disk=container_mount_disk)
# update Google Compute Engine resource
operation = client.apitools_client.instances.SetMetadata(
client.messages.ComputeInstancesSetMetadataRequest(
metadata=instance.metadata, **instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
set_metadata_waiter = waiter.WaitFor(
operation_poller, operation_ref,
'Updating specification of container [{0}]'.format(
instance_ref.Name()))
if (instance.status ==
client.messages.Instance.StatusValueValuesEnum.TERMINATED):
return set_metadata_waiter
elif (instance.status ==
client.messages.Instance.StatusValueValuesEnum.SUSPENDED):
return _StopVm(holder, client, instance_ref)
else:
_StopVm(holder, client, instance_ref)
return _StartVm(holder, client, instance_ref)
raise NoGceContainerDeclarationMetadataKey()
def _StopVm(holder, client, instance_ref):
operation = client.apitools_client.instances.Stop(
client.messages.ComputeInstancesStopRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Stopping instance [{0}]'.format(instance_ref.Name()))
def _StartVm(holder, client, instance_ref):
operation = client.apitools_client.instances.Start(
client.messages.ComputeInstancesStartRequest(
**instance_ref.AsDict()))
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.apitools_client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Starting instance [{0}]'.format(instance_ref.Name()))
def UpdateMetadata(holder, metadata, args, instance,
container_mount_disk_enabled=False,
container_mount_disk=None):
# precondition: metadata.key == GCE_CONTAINER_DECLARATION
manifest = yaml.load(metadata.value)
if args.IsSpecified('container_image'):
manifest['spec']['containers'][0]['image'] = args.container_image
if args.IsSpecified('container_command'):
manifest['spec']['containers'][0]['command'] = [args.container_command]
if args.IsSpecified('clear_container_command'):
manifest['spec']['containers'][0].pop('command', None)
if args.IsSpecified('container_arg'):
manifest['spec']['containers'][0]['args'] = args.container_arg
if args.IsSpecified('clear_container_args'):
manifest['spec']['containers'][0].pop('args', None)
if args.container_privileged is True:
manifest['spec']['containers'][0]['securityContext']['privileged'] = True
if args.container_privileged is False:
manifest['spec']['containers'][0]['securityContext']['privileged'] = False
if container_mount_disk_enabled:
container_mount_disk = container_mount_disk or []
disks = instance.disks
else:
container_mount_disk = []
# Only need disks for updating the container mount disk.
disks = []
_UpdateMounts(holder, manifest, args.remove_container_mounts or [],
args.container_mount_host_path or [],
args.container_mount_tmpfs or [],
container_mount_disk,
disks)
_UpdateEnv(manifest,
itertools.chain.from_iterable(args.remove_container_env or []),
args.container_env_file, args.container_env or [])
if args.container_stdin is True:
manifest['spec']['containers'][0]['stdin'] = True
if args.container_stdin is False:
manifest['spec']['containers'][0]['stdin'] = False
if args.container_tty is True:
manifest['spec']['containers'][0]['tty'] = True
if args.container_tty is False:
manifest['spec']['containers'][0]['tty'] = False
if args.IsSpecified('container_restart_policy'):
manifest['spec']['restartPolicy'] = RESTART_POLICY_API[
args.container_restart_policy]
metadata.value = DumpYaml(manifest)
def _UpdateMounts(holder, manifest, remove_container_mounts,
container_mount_host_path, container_mount_tmpfs,
container_mount_disk, disks):
_CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs,
container_mount_disk=container_mount_disk)
used_names = [volume['name'] for volume in manifest['spec']['volumes']]
volumes = []
volume_mounts = []
next_volume_index = 0
for volume in container_mount_host_path:
while _GetHostPathDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetHostPathDiskName(next_volume_index)
next_volume_index += 1
volumes.append({
'name': name,
'hostPath': {
'path': volume['host-path']
},
})
volume_mounts.append({
'name': name,
'mountPath': volume['mount-path'],
'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()
})
for tmpfs in container_mount_tmpfs:
while _GetTmpfsDiskName(next_volume_index) in used_names:
next_volume_index += 1
name = _GetTmpfsDiskName(next_volume_index)
next_volume_index += 1
volumes.append({'name': name, 'emptyDir': {'medium': 'Memory'}})
volume_mounts.append({'name': name, 'mountPath': tmpfs['mount-path']})
if container_mount_disk:
# Convert to dict to match helper input needs.
# The disk must already have a device name that matches its
# name. For disks that were attached to the instance already.
disks = [{'device-name': disk.deviceName,
'name': holder.resources.Parse(disk.source).Name()}
for disk in disks]
_AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts,
used_names=used_names, disks=disks)
manifest['spec']['containers'][0]['volumeMounts'].extend(volume_mounts)
manifest['spec']['volumes'].extend(volumes)
def _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path,
container_mount_tmpfs, container_mount_disk=None):
container_mount_disk = container_mount_disk or []
# volumeMounts stored in this list should be removed
mount_paths_to_remove = remove_container_mounts[:]
for host_path in container_mount_host_path:
mount_paths_to_remove.append(host_path['mount-path'])
for tmpfs in container_mount_tmpfs:
mount_paths_to_remove.append(tmpfs['mount-path'])
for disk in container_mount_disk:
mount_paths_to_remove.append(disk['mount-path'])
# volumeMounts stored in this list are used
used_mounts = []
used_mounts_names = []
removed_mount_names = []
for mount in manifest['spec']['containers'][0].get('volumeMounts', []):
if mount['mountPath'] not in mount_paths_to_remove:
used_mounts.append(mount)
used_mounts_names.append(mount['name'])
else:
removed_mount_names.append(mount['name'])
# override volumeMounts
manifest['spec']['containers'][0]['volumeMounts'] = used_mounts
# garbage collect volumes which become orphaned, skip volumes orphaned before
# start of the procedure
used_volumes = []
for volume in manifest['spec'].get('volumes', []):
if (volume['name'] in used_mounts_names or
volume['name'] not in removed_mount_names):
used_volumes.append(volume)
# override volumes
manifest['spec']['volumes'] = used_volumes
def _UpdateEnv(manifest, remove_container_env, container_env_file,
container_env):
current_env = {}
for env_val in manifest['spec']['containers'][0].get('env', []):
current_env[env_val['name']] = env_val['value']
for env in remove_container_env:
current_env.pop(env, None)
current_env.update(_ReadDictionary(container_env_file))
for env_var_dict in container_env:
for env, val in six.iteritems(env_var_dict):
current_env[env] = val
if current_env:
manifest['spec']['containers'][0]['env'] = [{
'name': env,
'value': val
} for env, val in six.iteritems(current_env)]
| true | true |
7901a3e8f02d8f8d5c8f255e43848232d4a5ec4e | 41 | py | Python | tcodtest.py | Rosuav/libtcodpy | 7d76cac7cd3e6930f09558c6735ef44859ebb4e3 | [
"Unlicense"
] | 3 | 2018-03-14T23:48:00.000Z | 2019-02-15T17:50:21.000Z | tcodtest.py | Rosuav/libtcodpy | 7d76cac7cd3e6930f09558c6735ef44859ebb4e3 | [
"Unlicense"
] | null | null | null | tcodtest.py | Rosuav/libtcodpy | 7d76cac7cd3e6930f09558c6735ef44859ebb4e3 | [
"Unlicense"
] | null | null | null | import libtcodpy
libtcodpy.say_hello()
| 8.2 | 21 | 0.804878 | import libtcodpy
libtcodpy.say_hello()
| true | true |
7901a405bba454615704364a1d7b2850bd853fea | 151 | py | Python | suggestions/urls.py | MindMantraSIH/paathshaala | 28fcee05f49e7b5dec734d6b9c46a5630e687c5d | [
"MIT"
] | null | null | null | suggestions/urls.py | MindMantraSIH/paathshaala | 28fcee05f49e7b5dec734d6b9c46a5630e687c5d | [
"MIT"
] | null | null | null | suggestions/urls.py | MindMantraSIH/paathshaala | 28fcee05f49e7b5dec734d6b9c46a5630e687c5d | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from django.urls import path, include
urlpatterns = [
path('',views.savedata,name="savedata"),
]
| 18.875 | 44 | 0.728477 | from django.urls import path
from . import views
from django.urls import path, include
urlpatterns = [
path('',views.savedata,name="savedata"),
]
| true | true |
7901a54bf20b6019a3838d28b9a1ebd9102164c7 | 196 | py | Python | CodeChef/problems/PROBCAT/main.py | object-oriented-human/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | 1 | 2022-02-21T15:43:01.000Z | 2022-02-21T15:43:01.000Z | CodeChef/problems/PROBCAT/main.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | CodeChef/problems/PROBCAT/main.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | tc = int(input())
while tc:
tc -= 1
x = int(input())
if 1 <= x and x < 100:
print("Easy")
elif 100 <= x and x < 200:
print("Medium")
else:
print("Hard") | 19.6 | 30 | 0.44898 | tc = int(input())
while tc:
tc -= 1
x = int(input())
if 1 <= x and x < 100:
print("Easy")
elif 100 <= x and x < 200:
print("Medium")
else:
print("Hard") | true | true |
7901a5c2e696a85bd807869a99aea8105a03f612 | 30,278 | py | Python | experiment code/GPU Experiments Code/task_submit_save.py | qore-dl/qore-dl-code | dc60df8fd072df5c641005992630f43892b7f78e | [
"Apache-2.0"
] | null | null | null | experiment code/GPU Experiments Code/task_submit_save.py | qore-dl/qore-dl-code | dc60df8fd072df5c641005992630f43892b7f78e | [
"Apache-2.0"
] | null | null | null | experiment code/GPU Experiments Code/task_submit_save.py | qore-dl/qore-dl-code | dc60df8fd072df5c641005992630f43892b7f78e | [
"Apache-2.0"
] | null | null | null | #https://blog.csdn.net/orangefly0214/article/details/81387077
import MultiTemplate
from MultiTemplate import TaskTemplate
# https://blog.csdn.net/u013812710/article/details/72886491
# https://blog.csdn.net/ismr_m/article/details/53100896
#https://blog.csdn.net/bcfdsagbfcisbg/article/details/78134172
import kubernetes
import os
import influxdb
import time
import yaml
def check_path(name):
train_dir = os.path.join('/tfdata/k8snfs/', name)
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
return train_dir
def check_ns(name):
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
# v1.create_namespace()
exist_ns = v1.list_namespace()
exist_ns_name = []
for i in exist_ns.items:
exist_ns_name.append(i.metadata.name)
if name in exist_ns_name:
return True
else:
return False
class SubTask():
def __init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag):
self.template_id = template_id
self.ps_replicas = ps_replicas
self.worker_replicas = worker_replicas
self.training_step = training_step
self.interval = interval
self.batch_size = batch_size
self.task_id = task_id
self.tag = tag
self.rtimes = rtimes
self.influx_client = influxdb.InfluxDBClient(host='192.168.128.10',port=8086,username='admin',password='admin',database="NODEMESSAGE")
self.node_list = ['k8s-master','k8s-worker0','k8s-worker2','k8sworker1','k8s-worker3','k8s-worker4','k8s-worker5']
#self.node_list = ['k8s-master','k8s-worker0','k8s-worker2','k8sworker1']
self.node_cpu = {}
self.node_cpu['k8s-master'] = 32000
self.node_cpu['k8s-worker0'] = 24000
self.node_cpu['k8s-worker2'] = 24000
self.node_cpu['k8sworker1'] = 16000
self.node_cpu['k8s-worker3'] = 24000
self.node_cpu['k8s-worker4'] = 16000
self.node_cpu['k8s-worker5'] = 24000
self.node_memory = {}
self.node_memory['k8s-master'] = float(251*1024)
self.node_memory['k8s-worker0'] = float(94*1024)
self.node_memory['k8s-worker2'] = float(94*1024)
self.node_memory['k8sworker1'] = float(125*1024)
self.node_memory['k8s-worker3'] = float(94 * 1024)
self.node_memory['k8s-worker4'] = float(125 * 1024)
self.node_memory['k8s-worker5'] = float(94 * 1024)
self.args = ['--training_step='+str(self.training_step),'--batch_size='+str(self.batch_size),'--interval='+str(self.interval),'--task_id='+str(self.task_id),'--rtimes='+str(self.rtimes),"--tag="+self.tag]
class VGGTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,channel1,channel2,channel3,channel4,channel5,num_layer1,num_layer2,num_layer3,num_layer4,num_layer5):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.num_layer1 = num_layer1
self.num_layer2 = num_layer2
self.num_layer3 = num_layer3
self.num_layer4 = num_layer4
self.num_layer5 = num_layer5
self.num_layers = num_layer1+num_layer2+num_layer3+num_layer4+num_layer5+3
self.template = TaskTemplate.VGG
self.v1 = v1
self.name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5='+str(self.channel5))
self.args.append('--num_layer1='+str(self.num_layer1))
self.args.append('--num_layer2='+str(self.num_layer2))
self.args.append('--num_layer3='+str(self.num_layer3))
self.args.append('--num_layer4='+str(self.num_layer4))
self.args.append('--num_layer5='+str(self.num_layer5))
self.args.append('--num_layers='+str(self.num_layers))
def create_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from "+"NODEMESSAGE"+" group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0]))/self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]]*0.6+memory_base[nodes[i]]*0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes '+key+' woksch-'
os.system(command)
command2 = 'kubectl label nodes '+key+' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes '+key+priori
os.system(command3)
if cpu_base[key] <= 0.57 and memory_base[key] <= 0.6:
command = 'kubectl label nodes '+key+' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
# f = open(log_dir+str(name)+'.yaml', "w")
f = open(log_dir + str(name) + '.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RESTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,bottle,layer1,layer2,layer3,layer4,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.bottle = bottle
self.layer1 = layer1
self.layer2 = layer2
self.layer3 = layer3
self.layer4 = layer4
self.name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
if self.bottle == 1:
self.num_layers = 3*(layer1+layer4+layer3+layer2)+2
else:
self.num_layers = 2 * (layer1 + layer4 + layer3 + layer2) + 2
self.template = TaskTemplate.RES
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--bottle=' + str(self.bottle))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--layer1='+str(self.layer1))
self.args.append('--layer2='+str(self.layer2))
self.args.append('--layer3='+str(self.layer3))
self.args.append('--layer4='+str(self.layer4))
def create_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,stack,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.stack = stack
self.num_layers = 6*self.stack+2
self.template = TaskTemplate.RE
self.name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--stack='+str(self.stack))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
def create_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class XCETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,repeat,channel1,channel2,channel3,channel4,channel5,channel6,channel7,channel8):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.channel6 = channel6
self.channel7 = channel7
self.channel8 = channel8
self.repeat = repeat
self.template = TaskTemplate.XCEPTION
self.v1 = v1
self.name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--repeat='+str(self.repeat))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5=' + str(self.channel5))
self.args.append('--channel6=' + str(self.channel6))
self.args.append('--channel7=' + str(self.channel7))
self.args.append('--channel8=' + str(self.channel8))
def create_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class DENTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,L,k,BC):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.L = L
self.k = k
self.BC = BC
self.template = TaskTemplate.DEN
self.v1 = v1
self.name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--L='+str(self.L))
self.args.append('--k='+str(self.k))
self.args.append('--BC='+str(self.BC))
def create_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
if __name__ == '__main__':
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
# v1.create_namespace()
v1.list_namespace()
check_path('ceshi')
# vgg = VGGTask(1,2,4,80,1.0,2,1,"ms",32,64,128,256,512,2,3,3,4,4)
# vgg.create_tf()
| 46.155488 | 219 | 0.594986 |
import MultiTemplate
from MultiTemplate import TaskTemplate
import kubernetes
import os
import influxdb
import time
import yaml
def check_path(name):
train_dir = os.path.join('/tfdata/k8snfs/', name)
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
return train_dir
def check_ns(name):
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
exist_ns = v1.list_namespace()
exist_ns_name = []
for i in exist_ns.items:
exist_ns_name.append(i.metadata.name)
if name in exist_ns_name:
return True
else:
return False
class SubTask():
def __init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag):
self.template_id = template_id
self.ps_replicas = ps_replicas
self.worker_replicas = worker_replicas
self.training_step = training_step
self.interval = interval
self.batch_size = batch_size
self.task_id = task_id
self.tag = tag
self.rtimes = rtimes
self.influx_client = influxdb.InfluxDBClient(host='192.168.128.10',port=8086,username='admin',password='admin',database="NODEMESSAGE")
self.node_list = ['k8s-master','k8s-worker0','k8s-worker2','k8sworker1','k8s-worker3','k8s-worker4','k8s-worker5']
self.node_cpu = {}
self.node_cpu['k8s-master'] = 32000
self.node_cpu['k8s-worker0'] = 24000
self.node_cpu['k8s-worker2'] = 24000
self.node_cpu['k8sworker1'] = 16000
self.node_cpu['k8s-worker3'] = 24000
self.node_cpu['k8s-worker4'] = 16000
self.node_cpu['k8s-worker5'] = 24000
self.node_memory = {}
self.node_memory['k8s-master'] = float(251*1024)
self.node_memory['k8s-worker0'] = float(94*1024)
self.node_memory['k8s-worker2'] = float(94*1024)
self.node_memory['k8sworker1'] = float(125*1024)
self.node_memory['k8s-worker3'] = float(94 * 1024)
self.node_memory['k8s-worker4'] = float(125 * 1024)
self.node_memory['k8s-worker5'] = float(94 * 1024)
self.args = ['--training_step='+str(self.training_step),'--batch_size='+str(self.batch_size),'--interval='+str(self.interval),'--task_id='+str(self.task_id),'--rtimes='+str(self.rtimes),"--tag="+self.tag]
class VGGTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,channel1,channel2,channel3,channel4,channel5,num_layer1,num_layer2,num_layer3,num_layer4,num_layer5):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.num_layer1 = num_layer1
self.num_layer2 = num_layer2
self.num_layer3 = num_layer3
self.num_layer4 = num_layer4
self.num_layer5 = num_layer5
self.num_layers = num_layer1+num_layer2+num_layer3+num_layer4+num_layer5+3
self.template = TaskTemplate.VGG
self.v1 = v1
self.name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5='+str(self.channel5))
self.args.append('--num_layer1='+str(self.num_layer1))
self.args.append('--num_layer2='+str(self.num_layer2))
self.args.append('--num_layer3='+str(self.num_layer3))
self.args.append('--num_layer4='+str(self.num_layer4))
self.args.append('--num_layer5='+str(self.num_layer5))
self.args.append('--num_layers='+str(self.num_layers))
def create_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from "+"NODEMESSAGE"+" group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0]))/self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]]*0.6+memory_base[nodes[i]]*0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes '+key+' woksch-'
os.system(command)
command2 = 'kubectl label nodes '+key+' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes '+key+priori
os.system(command3)
if cpu_base[key] <= 0.57 and memory_base[key] <= 0.6:
command = 'kubectl label nodes '+key+' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir + str(name) + '.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RESTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,bottle,layer1,layer2,layer3,layer4,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.bottle = bottle
self.layer1 = layer1
self.layer2 = layer2
self.layer3 = layer3
self.layer4 = layer4
self.name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
if self.bottle == 1:
self.num_layers = 3*(layer1+layer4+layer3+layer2)+2
else:
self.num_layers = 2 * (layer1 + layer4 + layer3 + layer2) + 2
self.template = TaskTemplate.RES
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--bottle=' + str(self.bottle))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--layer1='+str(self.layer1))
self.args.append('--layer2='+str(self.layer2))
self.args.append('--layer3='+str(self.layer3))
self.args.append('--layer4='+str(self.layer4))
def create_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,stack,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.stack = stack
self.num_layers = 6*self.stack+2
self.template = TaskTemplate.RE
self.name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--stack='+str(self.stack))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
def create_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class XCETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,repeat,channel1,channel2,channel3,channel4,channel5,channel6,channel7,channel8):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.channel6 = channel6
self.channel7 = channel7
self.channel8 = channel8
self.repeat = repeat
self.template = TaskTemplate.XCEPTION
self.v1 = v1
self.name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--repeat='+str(self.repeat))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5=' + str(self.channel5))
self.args.append('--channel6=' + str(self.channel6))
self.args.append('--channel7=' + str(self.channel7))
self.args.append('--channel8=' + str(self.channel8))
def create_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class DENTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,L,k,BC):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.L = L
self.k = k
self.BC = BC
self.template = TaskTemplate.DEN
self.v1 = v1
self.name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--L='+str(self.L))
self.args.append('--k='+str(self.k))
self.args.append('--BC='+str(self.BC))
def create_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
if __name__ == '__main__':
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
v1.list_namespace()
check_path('ceshi')
| true | true |
7901a7ae2350f0fe3414e33aee4cd0df685ec183 | 15,431 | py | Python | python/mxnet/gluon/nn/basic_layers.py | IIMarch/mxnet | 64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b | [
"Apache-2.0"
] | null | null | null | python/mxnet/gluon/nn/basic_layers.py | IIMarch/mxnet | 64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b | [
"Apache-2.0"
] | null | null | null | python/mxnet/gluon/nn/basic_layers.py | IIMarch/mxnet | 64c35f2d41f5bad3f9cbf4d4fda9cf3bf3dadb4b | [
"Apache-2.0"
] | 1 | 2018-11-30T21:34:24.000Z | 2018-11-30T21:34:24.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Basic neural network layers."""
from ..block import Block, HybridBlock
from ..utils import _indent
class Sequential(Block):
"""Stacks `Block`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class HybridSequential(HybridBlock):
"""Stacks `HybridBlock`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Input shape:
A 2D input with shape `(batch_size, in_units)`.
Output shape:
The output would have shape `(batch_size, units)`.
"""
def __init__(self, units, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = F.FullyConnected(x, weight, no_bias=True, num_hidden=self._units,
name='fwd')
else:
act = F.FullyConnected(x, weight, bias, num_hidden=self._units,
name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(self._in_units, self._units) if self._in_units
else self._units)
class Activation(HybridBlock):
"""Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
if hasattr(self, 'in_channels'):
s += ', in_channels={0}'.format(self.in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active::
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, alpha, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(N, M)`.
Output shape:
3D tensor with shape: `(N, M, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
"""Flattens the input to two dimensional.
Input shape:
Arbitrary shape `(N, a, b, c, ...)`
Output shape:
2D tensor with shape: `(N, a*b*c...)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
| 35.637413 | 97 | 0.580325 |
from ..block import Block, HybridBlock
from ..utils import _indent
class Sequential(Block):
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class HybridSequential(HybridBlock):
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
def __init__(self, units, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = F.FullyConnected(x, weight, no_bias=True, num_hidden=self._units,
name='fwd')
else:
act = F.FullyConnected(x, weight, bias, num_hidden=self._units,
name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(self._in_units, self._units) if self._in_units
else self._units)
class Activation(HybridBlock):
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
if hasattr(self, 'in_channels'):
s += ', in_channels={0}'.format(self.in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
def __init__(self, alpha, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
| true | true |
7901a80de89eb080152807d040a6b0cd565910ee | 3,346 | py | Python | Clases/Palabras.py | JorgeSchelotto/TrabajoFinalSeminarioPython | aae8cc914ff55cc09b7538722f27e1ec22954e57 | [
"MIT"
] | null | null | null | Clases/Palabras.py | JorgeSchelotto/TrabajoFinalSeminarioPython | aae8cc914ff55cc09b7538722f27e1ec22954e57 | [
"MIT"
] | null | null | null | Clases/Palabras.py | JorgeSchelotto/TrabajoFinalSeminarioPython | aae8cc914ff55cc09b7538722f27e1ec22954e57 | [
"MIT"
] | null | null | null | __author__ = 'Burgos, Agustin - Schelotto, Jorge'
# -*- coding: utf-8 -*-
# Copyright 2018 autors: Burgos Agustin, Schelotto Jorge
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pygame
class Palabras(pygame.sprite.Sprite):
def __init__(self, ruta, nombre, x, y):
super().__init__()
self.__palabra = nombre
self.__click = False
self.image = pygame.image.load(ruta).convert_alpha()
self.rect = self.image.get_rect()
self.collide = False
self.posX = x
self.posY = y
def getPosX(self):
return self.posX
def getPosY(self):
return self.posY
def getPalabra(self):
return self.__palabra
def getPalabraImagen(self):
return self.image
def setClick(self, bool):
self.__click = bool
def getClick(self):
return self.__click
def getRect(self):
return self.rect
def colli(self, x, y):
if x > 20:
# Achica la imagen
center = self.rect.center
x = x - 1
y = y - 1
self.image = pygame.transform.scale(self.image, (x, y))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.8)
elif x <= 20:
# Para que no de x < 0
center = self.rect.center
self.image = pygame.transform.scale(self.image, (0, 0))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.5)
def update(self,surface):
"""Controla los eventos y coliciones de los sprites Palabras"""
if not self.getClick() and not self.collide:
self.rect.center = (self.posX, self.posY)
if self.getClick():
#Si se hace click en la imagen
self.rect.center = pygame.mouse.get_pos()
if self.collide:
# Si hay colision
x = self.image.get_rect().size[0]
y = self.image.get_rect().size[1]
self.colli(x,y)
# Saca la imagen de la zona de colición.
if self.image.get_rect().size[0] <= 20:
self.rect.center = (0,0)
surface.blit(self.getPalabraImagen(), self.getRect())
| 35.978495 | 128 | 0.633293 | __author__ = 'Burgos, Agustin - Schelotto, Jorge'
import pygame
class Palabras(pygame.sprite.Sprite):
def __init__(self, ruta, nombre, x, y):
super().__init__()
self.__palabra = nombre
self.__click = False
self.image = pygame.image.load(ruta).convert_alpha()
self.rect = self.image.get_rect()
self.collide = False
self.posX = x
self.posY = y
def getPosX(self):
return self.posX
def getPosY(self):
return self.posY
def getPalabra(self):
return self.__palabra
def getPalabraImagen(self):
return self.image
def setClick(self, bool):
self.__click = bool
def getClick(self):
return self.__click
def getRect(self):
return self.rect
def colli(self, x, y):
if x > 20:
center = self.rect.center
x = x - 1
y = y - 1
self.image = pygame.transform.scale(self.image, (x, y))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.8)
elif x <= 20:
center = self.rect.center
self.image = pygame.transform.scale(self.image, (0, 0))
self.rect = self.image.get_rect()
self.rect.center = center
self.image = pygame.transform.rotozoom(self.image, -90, 0.5)
def update(self,surface):
if not self.getClick() and not self.collide:
self.rect.center = (self.posX, self.posY)
if self.getClick():
self.rect.center = pygame.mouse.get_pos()
if self.collide:
x = self.image.get_rect().size[0]
y = self.image.get_rect().size[1]
self.colli(x,y)
if self.image.get_rect().size[0] <= 20:
self.rect.center = (0,0)
surface.blit(self.getPalabraImagen(), self.getRect())
| true | true |
7901a83dc9858dc4b704285fb8cf552df259a434 | 7,107 | py | Python | examples/pytorch/ogb/line/reading_data.py | harshgrovr/Graphs_Thesis | 9ffd0d23c8f8b4bd53db9fd5b9bf5776666814e0 | [
"Apache-2.0"
] | 2 | 2020-08-05T07:21:51.000Z | 2021-02-20T10:22:23.000Z | examples/pytorch/ogb/line/reading_data.py | xyanAI/dgl | 36daf66f6216bad4d30651311bcb87aa45dd33d5 | [
"Apache-2.0"
] | 1 | 2019-02-06T02:02:41.000Z | 2019-02-06T20:22:32.000Z | examples/pytorch/ogb/line/reading_data.py | xyanAI/dgl | 36daf66f6216bad4d30651311bcb87aa45dd33d5 | [
"Apache-2.0"
] | 3 | 2019-03-04T12:46:05.000Z | 2019-08-14T18:53:19.000Z | import os
import numpy as np
import scipy.sparse as sp
import pickle
import torch
from torch.utils.data import DataLoader
from dgl.data.utils import download, _get_dgl_url, get_download_dir, extract_archive
import random
import time
import dgl
def ReadTxtNet(file_path="", undirected=True):
""" Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
"""
if file_path == 'youtube' or file_path == 'blog':
name = file_path
dir = get_download_dir()
zip_file_path='{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path,
'{}/{}'.format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(" ")))
assert len(tup) in [2, 3], "The format of network file is unrecognizable."
if len(tup) == 3:
n1, n2, w = tup
elif len(tup) == 2:
n1, n2 = tup
w = 1
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif n2 not in net[n1]:
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if n2 not in net:
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif n1 not in net[n2]:
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix(
(np.array(weight), (src, dst)),
dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
""" Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
"""
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
def make_undirected(G):
#G.readonly(False)
G.add_edges(G.edges()[1], G.edges()[0])
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees()).squeeze(-1)
return nodes
class LineDataset:
def __init__(self,
net_file,
batch_size,
num_samples,
negative=5,
gpus=[0],
fast_neg=True,
ogbl_name="",
load_from_ogbl=False,
ogbn_name="",
load_from_ogbn=False,
):
""" This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
"""
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print("Finish reading graph")
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()),
self.num_samples,
replace=True) # edge index
self.seeds = torch.split(torch.LongTensor(seeds),
int(np.ceil(self.num_samples / self.num_procs)),
0)
end = time.time()
t = end - start
print("generate %d samples in %.2fs" % (len(seeds), t))
# negative table for true negative sampling
self.valid_nodes = find_connected_nodes(self.G)
if not fast_neg:
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.valid_nodes):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree
def create_sampler(self, i):
""" create random walk sampler """
return EdgeSampler(self.G, self.seeds[i])
def save_mapping(self, map_file):
with open(map_file, "wb") as f:
pickle.dump(self.node2id, f)
class EdgeSampler(object):
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.edges = torch.cat((self.G.edges()[0].unsqueeze(0), self.G.edges()[1].unsqueeze(0)), 0).t()
def sample(self, seeds):
""" seeds torch.LongTensor : a batch of indices of edges """
return self.edges[torch.LongTensor(seeds)] | 33.523585 | 113 | 0.555649 | import os
import numpy as np
import scipy.sparse as sp
import pickle
import torch
from torch.utils.data import DataLoader
from dgl.data.utils import download, _get_dgl_url, get_download_dir, extract_archive
import random
import time
import dgl
def ReadTxtNet(file_path="", undirected=True):
if file_path == 'youtube' or file_path == 'blog':
name = file_path
dir = get_download_dir()
zip_file_path='{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path,
'{}/{}'.format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(" ")))
assert len(tup) in [2, 3], "The format of network file is unrecognizable."
if len(tup) == 3:
n1, n2, w = tup
elif len(tup) == 2:
n1, n2 = tup
w = 1
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif n2 not in net[n1]:
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if n2 not in net:
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif n1 not in net[n2]:
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix(
(np.array(weight), (src, dst)),
dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
def make_undirected(G):
G.add_edges(G.edges()[1], G.edges()[0])
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees()).squeeze(-1)
return nodes
class LineDataset:
def __init__(self,
net_file,
batch_size,
num_samples,
negative=5,
gpus=[0],
fast_neg=True,
ogbl_name="",
load_from_ogbl=False,
ogbn_name="",
load_from_ogbn=False,
):
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print("Finish reading graph")
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()),
self.num_samples,
replace=True)
self.seeds = torch.split(torch.LongTensor(seeds),
int(np.ceil(self.num_samples / self.num_procs)),
0)
end = time.time()
t = end - start
print("generate %d samples in %.2fs" % (len(seeds), t))
self.valid_nodes = find_connected_nodes(self.G)
if not fast_neg:
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.valid_nodes):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree
def create_sampler(self, i):
return EdgeSampler(self.G, self.seeds[i])
def save_mapping(self, map_file):
with open(map_file, "wb") as f:
pickle.dump(self.node2id, f)
class EdgeSampler(object):
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.edges = torch.cat((self.G.edges()[0].unsqueeze(0), self.G.edges()[1].unsqueeze(0)), 0).t()
def sample(self, seeds):
return self.edges[torch.LongTensor(seeds)] | true | true |
7901a89ee3d3c11e49347b9e169696120963f78e | 1,884 | py | Python | meraki/api/mg_dhcp_settings.py | NoFliesOnYou/dashboard-api-python | 3185d0e8a9a38eba9127ac640dcbb02444e7adf2 | [
"MIT"
] | null | null | null | meraki/api/mg_dhcp_settings.py | NoFliesOnYou/dashboard-api-python | 3185d0e8a9a38eba9127ac640dcbb02444e7adf2 | [
"MIT"
] | 3 | 2020-11-08T08:50:59.000Z | 2021-12-13T20:47:15.000Z | flask/meraki/api/mg_dhcp_settings.py | cyberdevnet/mer-hacker | a7dddd03c5b02a2f8c84d711b69868d2b94f1f99 | [
"MIT"
] | null | null | null | class MGDHCPSettings(object):
def __init__(self, session):
super(MGDHCPSettings, self).__init__()
self._session = session
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
"""
**List common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp
- networkId (string)
"""
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'getNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource)
def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
"""
**Update common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp
- networkId (string)
- dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.
- dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.
- dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'
"""
kwargs.update(locals())
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'updateNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
| 40.956522 | 166 | 0.636943 | class MGDHCPSettings(object):
def __init__(self, session):
super(MGDHCPSettings, self).__init__()
self._session = session
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'getNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource)
def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
kwargs.update(locals())
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'updateNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
| true | true |
7901a98d4fe4226ee2b868d6b15ef9b15f874051 | 8,738 | py | Python | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_update_parameters.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | null | null | null | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_update_parameters.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | null | null | null | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_update_parameters.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_base import ApplicationBase
class ApplicationUpdateParameters(ApplicationBase):
"""Request parameters for updating a new application.
:param allow_guests_sign_in: A property on the application to indicate if
the application accepts other IDPs or not or partially accepts.
:type allow_guests_sign_in: bool
:param allow_passthrough_users: Indicates that the application supports
pass through users who have no presence in the resource tenant.
:type allow_passthrough_users: bool
:param app_logo_url: The url for the application logo image stored in a
CDN.
:type app_logo_url: str
:param app_roles: The collection of application roles that an application
may declare. These roles can be assigned to users, groups or service
principals.
:type app_roles: list[~azure.graphrbac.models.AppRole]
:param app_permissions: The application permissions.
:type app_permissions: list[str]
:param available_to_other_tenants: Whether the application is available to
other tenants.
:type available_to_other_tenants: bool
:param error_url: A URL provided by the author of the application to
report errors when using the application.
:type error_url: str
:param group_membership_claims: Configures the groups claim issued in a
user or OAuth 2.0 access token that the app expects. Possible values
include: 'None', 'SecurityGroup', 'All'
:type group_membership_claims: str or
~azure.graphrbac.models.GroupMembershipClaimTypes
:param homepage: The home page of the application.
:type homepage: str
:param informational_urls: URLs with more information about the
application.
:type informational_urls: ~azure.graphrbac.models.InformationalUrl
:param is_device_only_auth_supported: Specifies whether this application
supports device authentication without a user. The default is false.
:type is_device_only_auth_supported: bool
:param key_credentials: A collection of KeyCredential objects.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param known_client_applications: Client applications that are tied to
this resource application. Consent to any of the known client applications
will result in implicit consent to the resource application through a
combined consent dialog (showing the OAuth permission scopes required by
the client and the resource).
:type known_client_applications: list[str]
:param logout_url: the url of the logout page
:type logout_url: str
:param oauth2_allow_implicit_flow: Whether to allow implicit grant flow
for OAuth2
:type oauth2_allow_implicit_flow: bool
:param oauth2_allow_url_path_matching: Specifies whether during a token
Request Azure AD will allow path matching of the redirect URI against the
applications collection of replyURLs. The default is false.
:type oauth2_allow_url_path_matching: bool
:param oauth2_permissions: The collection of OAuth 2.0 permission scopes
that the web API (resource) application exposes to client applications.
These permission scopes may be granted to client applications during
consent.
:type oauth2_permissions: list[~azure.graphrbac.models.OAuth2Permission]
:param oauth2_require_post_response: Specifies whether, as part of OAuth
2.0 token requests, Azure AD will allow POST requests, as opposed to GET
requests. The default is false, which specifies that only GET requests
will be allowed.
:type oauth2_require_post_response: bool
:param org_restrictions: A list of tenants allowed to access application.
:type org_restrictions: list[str]
:param optional_claims:
:type optional_claims: ~azure.graphrbac.models.OptionalClaims
:param password_credentials: A collection of PasswordCredential objects
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param pre_authorized_applications: list of pre-authorized applications.
:type pre_authorized_applications:
list[~azure.graphrbac.models.PreAuthorizedApplication]
:param public_client: Specifies whether this application is a public
client (such as an installed application running on a mobile device).
Default is false.
:type public_client: bool
:param publisher_domain: Reliable domain which can be used to identify an
application.
:type publisher_domain: str
:param reply_urls: A collection of reply URLs for the application.
:type reply_urls: list[str]
:param required_resource_access: Specifies resources that this application
requires access to and the set of OAuth permission scopes and application
roles that it needs under each of those resources. This pre-configuration
of required resource access drives the consent experience.
:type required_resource_access:
list[~azure.graphrbac.models.RequiredResourceAccess]
:param saml_metadata_url: The URL to the SAML metadata for the
application.
:type saml_metadata_url: str
:param sign_in_audience: Audience for signing in to the application
(AzureADMyOrganization, AzureADAllOrganizations,
AzureADAndMicrosoftAccounts).
:type sign_in_audience: str
:param www_homepage: The primary Web page.
:type www_homepage: str
:param display_name: The display name of the application.
:type display_name: str
:param identifier_uris: A collection of URIs for the application.
:type identifier_uris: list[str]
"""
_attribute_map = {
'allow_guests_sign_in': {'key': 'allowGuestsSignIn', 'type': 'bool'},
'allow_passthrough_users': {'key': 'allowPassthroughUsers', 'type': 'bool'},
'app_logo_url': {'key': 'appLogoUrl', 'type': 'str'},
'app_roles': {'key': 'appRoles', 'type': '[AppRole]'},
'app_permissions': {'key': 'appPermissions', 'type': '[str]'},
'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'},
'error_url': {'key': 'errorUrl', 'type': 'str'},
'group_membership_claims': {'key': 'groupMembershipClaims', 'type': 'str'},
'homepage': {'key': 'homepage', 'type': 'str'},
'informational_urls': {'key': 'informationalUrls', 'type': 'InformationalUrl'},
'is_device_only_auth_supported': {'key': 'isDeviceOnlyAuthSupported', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'known_client_applications': {'key': 'knownClientApplications', 'type': '[str]'},
'logout_url': {'key': 'logoutUrl', 'type': 'str'},
'oauth2_allow_implicit_flow': {'key': 'oauth2AllowImplicitFlow', 'type': 'bool'},
'oauth2_allow_url_path_matching': {'key': 'oauth2AllowUrlPathMatching', 'type': 'bool'},
'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'},
'oauth2_require_post_response': {'key': 'oauth2RequirePostResponse', 'type': 'bool'},
'org_restrictions': {'key': 'orgRestrictions', 'type': '[str]'},
'optional_claims': {'key': 'optionalClaims', 'type': 'OptionalClaims'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'pre_authorized_applications': {'key': 'preAuthorizedApplications', 'type': '[PreAuthorizedApplication]'},
'public_client': {'key': 'publicClient', 'type': 'bool'},
'publisher_domain': {'key': 'publisherDomain', 'type': 'str'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'required_resource_access': {'key': 'requiredResourceAccess', 'type': '[RequiredResourceAccess]'},
'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'},
'sign_in_audience': {'key': 'signInAudience', 'type': 'str'},
'www_homepage': {'key': 'wwwHomepage', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'identifier_uris': {'key': 'identifierUris', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationUpdateParameters, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.identifier_uris = kwargs.get('identifier_uris', None)
| 55.656051 | 114 | 0.707027 |
from .application_base import ApplicationBase
class ApplicationUpdateParameters(ApplicationBase):
_attribute_map = {
'allow_guests_sign_in': {'key': 'allowGuestsSignIn', 'type': 'bool'},
'allow_passthrough_users': {'key': 'allowPassthroughUsers', 'type': 'bool'},
'app_logo_url': {'key': 'appLogoUrl', 'type': 'str'},
'app_roles': {'key': 'appRoles', 'type': '[AppRole]'},
'app_permissions': {'key': 'appPermissions', 'type': '[str]'},
'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'},
'error_url': {'key': 'errorUrl', 'type': 'str'},
'group_membership_claims': {'key': 'groupMembershipClaims', 'type': 'str'},
'homepage': {'key': 'homepage', 'type': 'str'},
'informational_urls': {'key': 'informationalUrls', 'type': 'InformationalUrl'},
'is_device_only_auth_supported': {'key': 'isDeviceOnlyAuthSupported', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'known_client_applications': {'key': 'knownClientApplications', 'type': '[str]'},
'logout_url': {'key': 'logoutUrl', 'type': 'str'},
'oauth2_allow_implicit_flow': {'key': 'oauth2AllowImplicitFlow', 'type': 'bool'},
'oauth2_allow_url_path_matching': {'key': 'oauth2AllowUrlPathMatching', 'type': 'bool'},
'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'},
'oauth2_require_post_response': {'key': 'oauth2RequirePostResponse', 'type': 'bool'},
'org_restrictions': {'key': 'orgRestrictions', 'type': '[str]'},
'optional_claims': {'key': 'optionalClaims', 'type': 'OptionalClaims'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'pre_authorized_applications': {'key': 'preAuthorizedApplications', 'type': '[PreAuthorizedApplication]'},
'public_client': {'key': 'publicClient', 'type': 'bool'},
'publisher_domain': {'key': 'publisherDomain', 'type': 'str'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'required_resource_access': {'key': 'requiredResourceAccess', 'type': '[RequiredResourceAccess]'},
'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'},
'sign_in_audience': {'key': 'signInAudience', 'type': 'str'},
'www_homepage': {'key': 'wwwHomepage', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'identifier_uris': {'key': 'identifierUris', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationUpdateParameters, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.identifier_uris = kwargs.get('identifier_uris', None)
| true | true |
7901aaade44c85abaf27653db1e6058e1609af80 | 7,148 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/scaleway_ip.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/scaleway_ip.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/scaleway_ip.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Scaleway IP management module
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: scaleway_ip
short_description: Scaleway IP management module
author: Remy Leone (@remyleone)
description:
- This module manages IP on Scaleway account
U(https://developer.scaleway.com)
extends_documentation_fragment:
- community.general.scaleway
options:
state:
type: str
description:
- Indicate desired state of the IP.
default: present
choices:
- present
- absent
organization:
type: str
description:
- Scaleway organization identifier
required: true
region:
type: str
description:
- Scaleway region to use (for example par1).
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
- par2
- EMEA-FR-PAR2
- waw1
- EMEA-PL-WAW1
id:
type: str
description:
- id of the Scaleway IP (UUID)
server:
type: str
description:
- id of the server you want to attach an IP to.
- To unattach an IP don't specify this option
reverse:
type: str
description:
- Reverse to assign to the IP
'''
EXAMPLES = '''
- name: Create an IP
community.general.scaleway_ip:
organization: '{{ scw_org }}'
state: present
region: par1
register: ip_creation_task
- name: Make sure IP deleted
community.general.scaleway_ip:
id: '{{ ip_creation_task.scaleway_ip.id }}'
state: absent
region: par1
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ips": [
{
"organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
"reverse": null,
"id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
"server": {
"id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
"name": "ansible_tuto-1"
},
"address": "212.47.232.136"
}
]
}
'''
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
from ansible.module_utils.basic import AnsibleModule
def ip_attributes_should_be_changed(api, target_ip, wished_ip):
patch_payload = {}
if target_ip["reverse"] != wished_ip["reverse"]:
patch_payload["reverse"] = wished_ip["reverse"]
# IP is assigned to a server
if target_ip["server"] is None and wished_ip["server"]:
patch_payload["server"] = wished_ip["server"]
# IP is unassigned to a server
try:
if target_ip["server"]["id"] and wished_ip["server"] is None:
patch_payload["server"] = wished_ip["server"]
except (TypeError, KeyError):
pass
# IP is migrated between 2 different servers
try:
if target_ip["server"]["id"] != wished_ip["server"]:
patch_payload["server"] = wished_ip["server"]
except (TypeError, KeyError):
pass
return patch_payload
def payload_from_wished_ip(wished_ip):
return dict(
(k, v)
for k, v in wished_ip.items()
if k != 'id' and v is not None
)
def present_strategy(api, wished_ip):
changed = False
response = api.get('ips')
if not response.ok:
api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
response.status_code, response.json['message']))
ips_list = response.json["ips"]
ip_lookup = dict((ip["id"], ip)
for ip in ips_list)
if wished_ip["id"] not in ip_lookup.keys():
changed = True
if api.module.check_mode:
return changed, {"status": "An IP would be created."}
# Create IP
creation_response = api.post('/ips',
data=payload_from_wished_ip(wished_ip))
if not creation_response.ok:
msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
creation_response.json['message'],
creation_response.json)
api.module.fail_json(msg=msg)
return changed, creation_response.json["ip"]
target_ip = ip_lookup[wished_ip["id"]]
patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
if not patch_payload:
return changed, target_ip
changed = True
if api.module.check_mode:
return changed, {"status": "IP attributes would be changed."}
ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
data=patch_payload)
if not ip_patch_response.ok:
api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
ip_patch_response.status_code, ip_patch_response.json['message']))
return changed, ip_patch_response.json["ip"]
def absent_strategy(api, wished_ip):
response = api.get('ips')
changed = False
status_code = response.status_code
ips_json = response.json
ips_list = ips_json["ips"]
if not response.ok:
api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
status_code, response.json['message']))
ip_lookup = dict((ip["id"], ip)
for ip in ips_list)
if wished_ip["id"] not in ip_lookup.keys():
return changed, {}
changed = True
if api.module.check_mode:
return changed, {"status": "IP would be destroyed"}
response = api.delete('/ips/' + wished_ip["id"])
if not response.ok:
api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
response.status_code, response.json))
return changed, response.json
def core(module):
wished_ip = {
"organization": module.params['organization'],
"reverse": module.params["reverse"],
"id": module.params["id"],
"server": module.params["server"]
}
region = module.params["region"]
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
api = Scaleway(module=module)
if module.params["state"] == "absent":
changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
else:
changed, summary = present_strategy(api=api, wished_ip=wished_ip)
module.exit_json(changed=changed, scaleway_ip=summary)
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['absent', 'present']),
organization=dict(required=True),
server=dict(),
reverse=dict(),
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
id=dict()
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
| 27.178707 | 131 | 0.615837 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: scaleway_ip
short_description: Scaleway IP management module
author: Remy Leone (@remyleone)
description:
- This module manages IP on Scaleway account
U(https://developer.scaleway.com)
extends_documentation_fragment:
- community.general.scaleway
options:
state:
type: str
description:
- Indicate desired state of the IP.
default: present
choices:
- present
- absent
organization:
type: str
description:
- Scaleway organization identifier
required: true
region:
type: str
description:
- Scaleway region to use (for example par1).
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
- par2
- EMEA-FR-PAR2
- waw1
- EMEA-PL-WAW1
id:
type: str
description:
- id of the Scaleway IP (UUID)
server:
type: str
description:
- id of the server you want to attach an IP to.
- To unattach an IP don't specify this option
reverse:
type: str
description:
- Reverse to assign to the IP
'''
EXAMPLES = '''
- name: Create an IP
community.general.scaleway_ip:
organization: '{{ scw_org }}'
state: present
region: par1
register: ip_creation_task
- name: Make sure IP deleted
community.general.scaleway_ip:
id: '{{ ip_creation_task.scaleway_ip.id }}'
state: absent
region: par1
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ips": [
{
"organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
"reverse": null,
"id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
"server": {
"id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
"name": "ansible_tuto-1"
},
"address": "212.47.232.136"
}
]
}
'''
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
from ansible.module_utils.basic import AnsibleModule
def ip_attributes_should_be_changed(api, target_ip, wished_ip):
patch_payload = {}
if target_ip["reverse"] != wished_ip["reverse"]:
patch_payload["reverse"] = wished_ip["reverse"]
# IP is assigned to a server
if target_ip["server"] is None and wished_ip["server"]:
patch_payload["server"] = wished_ip["server"]
# IP is unassigned to a server
try:
if target_ip["server"]["id"] and wished_ip["server"] is None:
patch_payload["server"] = wished_ip["server"]
except (TypeError, KeyError):
pass
# IP is migrated between 2 different servers
try:
if target_ip["server"]["id"] != wished_ip["server"]:
patch_payload["server"] = wished_ip["server"]
except (TypeError, KeyError):
pass
return patch_payload
def payload_from_wished_ip(wished_ip):
return dict(
(k, v)
for k, v in wished_ip.items()
if k != 'id' and v is not None
)
def present_strategy(api, wished_ip):
changed = False
response = api.get('ips')
if not response.ok:
api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
response.status_code, response.json['message']))
ips_list = response.json["ips"]
ip_lookup = dict((ip["id"], ip)
for ip in ips_list)
if wished_ip["id"] not in ip_lookup.keys():
changed = True
if api.module.check_mode:
return changed, {"status": "An IP would be created."}
# Create IP
creation_response = api.post('/ips',
data=payload_from_wished_ip(wished_ip))
if not creation_response.ok:
msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
creation_response.json['message'],
creation_response.json)
api.module.fail_json(msg=msg)
return changed, creation_response.json["ip"]
target_ip = ip_lookup[wished_ip["id"]]
patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
if not patch_payload:
return changed, target_ip
changed = True
if api.module.check_mode:
return changed, {"status": "IP attributes would be changed."}
ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
data=patch_payload)
if not ip_patch_response.ok:
api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
ip_patch_response.status_code, ip_patch_response.json['message']))
return changed, ip_patch_response.json["ip"]
def absent_strategy(api, wished_ip):
response = api.get('ips')
changed = False
status_code = response.status_code
ips_json = response.json
ips_list = ips_json["ips"]
if not response.ok:
api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
status_code, response.json['message']))
ip_lookup = dict((ip["id"], ip)
for ip in ips_list)
if wished_ip["id"] not in ip_lookup.keys():
return changed, {}
changed = True
if api.module.check_mode:
return changed, {"status": "IP would be destroyed"}
response = api.delete('/ips/' + wished_ip["id"])
if not response.ok:
api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
response.status_code, response.json))
return changed, response.json
def core(module):
wished_ip = {
"organization": module.params['organization'],
"reverse": module.params["reverse"],
"id": module.params["id"],
"server": module.params["server"]
}
region = module.params["region"]
module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
api = Scaleway(module=module)
if module.params["state"] == "absent":
changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
else:
changed, summary = present_strategy(api=api, wished_ip=wished_ip)
module.exit_json(changed=changed, scaleway_ip=summary)
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['absent', 'present']),
organization=dict(required=True),
server=dict(),
reverse=dict(),
region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
id=dict()
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
| true | true |
7901aca03203d142adecf88e42823f69d90c6575 | 52,847 | py | Python | egs/wsj/s5/steps/cleanup/internal/segment_ctm_edits.py | HunterJiang/kaldi | 3fe38c82fa0936f7a4ea3347a54e9e00fb2471a8 | [
"Apache-2.0"
] | 319 | 2016-10-24T23:08:04.000Z | 2022-03-08T02:36:51.000Z | egs/wsj/s5/steps/cleanup/internal/segment_ctm_edits.py | zwcih/kaldi-ctc | 2c47b99f5efba22cb3989eed1a7757bf5d9927ce | [
"Apache-2.0"
] | 18 | 2017-01-12T12:08:07.000Z | 2020-06-18T07:37:20.000Z | egs/wsj/s5/steps/cleanup/internal/segment_ctm_edits.py | zwcih/kaldi-ctc | 2c47b99f5efba22cb3989eed1a7757bf5d9927ce | [
"Apache-2.0"
] | 87 | 2016-10-25T04:39:48.000Z | 2021-12-24T07:47:31.000Z | #!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import sys, operator, argparse, os
from collections import defaultdict
# This script reads 'ctm-edits' file format that is produced by get_ctm_edits.py
# and modified by modify_ctm_edits.py and taint_ctm_edits.py Its function is to
# produce a segmentation and text from the ctm-edits input.
# The ctm-edits file format that this script expects is as follows
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> ['tainted']
# [note: file-id is really utterance-id at this point].
parser = argparse.ArgumentParser(
description = "This program produces segmentation and text information "
"based on reading ctm-edits input format which is produced by "
"steps/cleanup/internal/get_ctm_edits.py, steps/cleanup/internal/modify_ctm_edits.py and "
"steps/cleanup/internal/taint_ctm_edits.py.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--min-segment-length", type = float, default = 0.5,
help = "Minimum allowed segment length (in seconds) for any "
"segment; shorter segments than this will be discarded.")
parser.add_argument("--min-new-segment-length", type = float, default = 1.0,
help = "Minimum allowed segment length (in seconds) for newly "
"created segments (i.e. not identical to the input utterances). "
"Expected to be >= --min-segment-length.")
parser.add_argument("--frame-length", type = float, default = 0.01,
help = "This only affects rounding of the output times; they will "
"be constrained to multiples of this value.")
parser.add_argument("--max-tainted-length", type = float, default = 0.05,
help = "Maximum allowed length of any 'tainted' line. Note: "
"'tainted' lines may only appear at the boundary of a "
"segment")
parser.add_argument("--max-edge-silence-length", type = float, default = 0.5,
help = "Maximum allowed length of silence if it appears at the "
"edge of a segment (will be truncated). This rule is "
"relaxed if such truncation would take a segment below "
"the --min-segment-length or --min-new-segment-length.")
parser.add_argument("--max-edge-non-scored-length", type = float, default = 0.5,
help = "Maximum allowed length of a non-scored word (noise, cough, etc.) "
"if it appears at the edge of a segment (will be truncated). "
"This rule is relaxed if such truncation would take a "
"segment below the --min-segment-length.")
parser.add_argument("--max-internal-silence-length", type = float, default = 2.0,
help = "Maximum allowed length of silence if it appears inside a segment "
"(will cause the segment to be split).")
parser.add_argument("--max-internal-non-scored-length", type = float, default = 2.0,
help = "Maximum allowed length of a non-scored word (noise, etc.) if "
"it appears inside a segment (will cause the segment to be "
"split). Note: reference words which are real words but OOV "
"are not included in this category.")
parser.add_argument("--unk-padding", type = float, default = 0.05,
help = "Amount of padding with <unk> that we do if a segment boundary is "
"next to errors (ins, del, sub). That is, we add this amount of "
"time to the segment and add the <unk> word to cover the acoustics. "
"If nonzero, the --oov-symbol-file option must be supplied.")
parser.add_argument("--max-junk-proportion", type = float, default = 0.1,
help = "Maximum proportion of the time of the segment that may "
"consist of potentially bad data, in which we include 'tainted' lines of "
"the ctm-edits input and unk-padding.")
parser.add_argument("--max-deleted-words-kept-when-merging", type = str, default = 1,
help = "When merging segments that are found to be overlapping or "
"adjacent after all other processing, keep in the transcript the "
"reference words that were deleted between the segments [if any] "
"as long as there were no more than this many reference words. "
"Setting this to zero will mean that any reference words that "
"were deleted between the segments we're about to reattach will "
"not appear in the generated transcript (so we'll match the hyp).")
parser.add_argument("--oov-symbol-file", type = str, default = None,
help = "Filename of file such as data/lang/oov.txt which contains "
"the text form of the OOV word, normally '<unk>'. Supplied as "
"a file to avoid complications with escaping. Necessary if "
"the --unk-padding option has a nonzero value (which it does "
"by default.")
parser.add_argument("--ctm-edits-out", type = str,
help = "Filename to output an extended version of the ctm-edits format "
"with segment start and end points noted. This file is intended to be "
"read by humans; there are currently no scripts that will read it.")
parser.add_argument("--word-stats-out", type = str,
help = "Filename for output of word-level stats, of the form "
"'<word> <bad-proportion> <total-count-in-ref>', e.g. 'hello 0.12 12408', "
"where the <bad-proportion> is the proportion of the time that this "
"reference word does not make it into a segment. It can help reveal words "
"that have problematic pronunciations or are associated with "
"transcription errors.")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/internal/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("text_out", metavar = "<text-out>",
help = "Filename of output text file (same format as data/train/text, i.e. "
"<new-utterance-id> <word1> <word2> ... <wordN>")
parser.add_argument("segments_out", metavar = "<segments-out>",
help = "Filename of output segments. This has the same format as data/train/segments, "
"but instead of <recording-id>, the second field is the old utterance-id, i.e "
"<new-utterance-id> <old-utterance-id> <start-time> <end-time>")
args = parser.parse_args()
def IsTainted(split_line_of_utt):
return len(split_line_of_utt) > 8 and split_line_of_utt[8] == 'tainted'
# This function returns a list of pairs (start-index, end-index) representing
# the cores of segments (so if a pair is (s, e), then the core of a segment
# would span (s, s+1, ... e-1).
#
# By the 'core of a segment', we mean a sequence of ctm-edits lines including at
# least one 'cor' line and a contiguous sequence of other lines of the type
# 'cor', 'fix' and 'sil' that must be not tainted. The segment core excludes
# any tainted lines at the edge of a segment, which will be added later.
#
# We only initiate segments when it contains something correct and not realized
# as unk (i.e. ref==hyp); and we extend it with anything that is 'sil' or 'fix'
# or 'cor' that is not tainted. Contiguous regions of 'true' in the resulting
# boolean array will then become the cores of prototype segments, and we'll add
# any adjacent tainted words (or parts of them).
def ComputeSegmentCores(split_lines_of_utt):
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [ False] * num_lines
for i in range(num_lines):
if split_lines_of_utt[i][7] == 'cor' and \
split_lines_of_utt[i][4] == split_lines_of_utt[i][6]:
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i-1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i+1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start == None:
cur_segment_start = i
else:
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, i) )
cur_segment_start = None
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, num_lines) )
return segment_ranges
class Segment:
def __init__(self, split_lines_of_utt, start_index, end_index, debug_str = None):
self.split_lines_of_utt = split_lines_of_utt
# start_index is the index of the first line that appears in this
# segment, and end_index is one past the last line. This does not
# include unk-padding.
self.start_index = start_index
self.end_index = end_index
# If the following values are nonzero, then when we create the segment
# we will add <unk> at the start and end of the segment [representing
# partial words], with this amount of additional audio.
self.start_unk_padding = 0.0
self.end_unk_padding = 0.0
# debug_str keeps track of the 'core' of the segment.
if debug_str == None:
debug_str = 'core-start={0},core-end={1}'.format(start_index,end_index)
self.debug_str = debug_str
# This gives the proportion of the time of the first line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.start_keep_proportion = 1.0
# This gives the proportion of the time of the last line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
# proportion of the time.
self.end_keep_proportion = 1.0
# This is stage 1 of segment processing (after creating the boundaries of the
# core of the segment, which is done outside of this class).a
#
# This function may reduce start_index and/or increase end_index by
# including a single adjacent 'tainted' line from the ctm-edits file. This
# is only done if the lines at the boundaries of the segment are currently
# real non-silence words and not non-scored words. The idea is that we
# probably don't want to start or end the segment right at the boundary of a
# real word, we want to add some kind of padding.
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
# we're iterating over the segment (start, end)
for b in [False, True]:
if b:
boundary_index = self.end_index - 1
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = self.start_index - 1
if adjacent_index >= 0 and adjacent_index < len(split_lines_of_utt):
# only consider merging the adjacent word into the segment if we're not
# at a segment boundary.
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
# if the adjacent line wasn't tainted, then there must have been
# another stronger reason why we didn't include it in the core
# of the segment (probably that it was an ins, del or sub), so
# there is no point considering it.
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
# we only add the tainted line to the segment if the word at
# the boundary was a non-silence word that was correctly
# decoded and not fixed [see modify_ctm_edits.py.]
if boundary_edit_type == 'cor' and \
not boundary_hyp_word in non_scored_words:
# Add the adjacent tainted line to the segment.
if b:
self.end_index += 1
else:
self.start_index -= 1
# This is stage 2 of segment processing.
# This function will split a segment into multiple pieces if any of the
# internal [non-boundary] silences or non-scored words are longer
# than the allowed values --max-internal-silence-length and
# --max-internal-non-scored-length. This function returns a
# list of segments. In the normal case (where there is no splitting)
# it just returns an array with a single element 'self'.
def PossiblySplitSegment(self):
global non_scored_words, args
# make sure the segment hasn't been processed more than we expect.
assert self.start_unk_padding == 0.0 and self.end_unk_padding == 0.0 and \
self.start_keep_proportion == 1.0 and self.end_keep_proportion == 1.0
segments = [] # the answer
cur_start_index = self.start_index
cur_start_is_split = False
# only consider splitting at non-boundary lines. [we'd just truncate
# the boundary lines.]
for index_to_split_at in range(cur_start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (this_edit_type == 'sil' and this_duration > args.max_internal_silence_length) or \
(this_ref_word in non_scored_words and this_duration > args.max_internal_non_scored_length):
# We split this segment at this index, dividing the word in two
# [later on, in PossiblyTruncateBoundaries, it may be further
# truncated.]
# Note: we use 'index_to_split_at + 1' because the Segment constructor
# takes an 'end-index' which is interpreted as one past the end.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
index_to_split_at + 1, self.debug_str)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if len(segments) == 0: # We did not split.
segments.append(self)
else:
# We did split. Add the very last segment.
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
self.end_index, self.debug_str)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
# This is stage 3 of segment processing. It will truncate the silences and
# non-scored words at the segment boundaries if they are longer than the
# --max-edge-silence-length and --max-edge-non-scored-length respectively
# (and to the extent that this wouldn't take us below the
# --min-segment-length or --min-new-segment-length).
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_edit == 'sil' and \
this_duration > args.max_edge_silence_length:
truncated_duration = args.max_edge_silence_length
elif this_ref_word in non_scored_words and \
this_duration > args.max_edge_non_scored_length:
truncated_duration = args.max_edge_non_scored_length
if truncated_duration != None:
keep_proportion = truncated_duration / this_duration
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
# This relaxes the segment-boundary truncation of
# PossiblyTruncateBoundaries(), if it would take us below
# min-new-segment-length or min-segment-length. Note: this does not relax
# the boundary truncation for a particular boundary (start or end) if that
# boundary corresponds to a 'tainted' line of the ctm (because it's
# dangerous to include too much 'tainted' audio).
def RelaxBoundaryTruncation(self):
# this should be called before adding unk padding.
assert self.start_unk_padding == self.end_unk_padding == 0.0
if self.start_keep_proportion == self.end_keep_proportion == 1.0:
return # nothing to do there was no truncation.
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if length_with_truncation >= length_cutoff:
return # Nothing to do.
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if not IsTainted(self.split_lines_of_utt[self.start_index]):
self.start_keep_proportion = 1.0
if not IsTainted(self.split_lines_of_utt[self.end_index - 1]):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if length_with_relaxed_boundaries <= length_cutoff:
# Completely undo the truncation [to the extent allowed by the
# presence of tainted lines at the start/end] if, even without
# truncation, we'd be below the length cutoff. This segment may be
# removed later on (but it may not, if removing truncation makes us
# identical to the input utterance, and the length is between
# min_segment_length min_new_segment_length).
return
# Next, compute an interpolation constant a such that the
# {start,end}_keep_proportion values will equal a *
# [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values].
# we're solving the equation:
# length_cutoff = a * length_with_truncation + (1-a) * length_with_relaxed_boundaries
# -> length_cutoff - length_with_relaxed_boundaries =
# a * (length_with_truncation - length_with_relaxed_boundaries)
# -> a = (length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries)
a = (length_cutoff - length_with_relaxed_boundaries) / \
(length_with_truncation - length_with_relaxed_boundaries)
if a < 0.0 or a > 1.0:
print("segment_ctm_edits.py: bad 'a' value = {0}".format(a), file = sys.stderr)
return
self.start_keep_proportion = \
a * orig_start_keep_proportion + (1-a) * self.start_keep_proportion
self.end_keep_proportion = \
a * orig_end_keep_proportion + (1-a) * self.end_keep_proportion
if not abs(self.Length() - length_cutoff) < 0.01:
print("segment_ctm_edits.py: possible problem relaxing boundary "
"truncation, length is {0} vs {1}".format(self.Length(), length_cutoff),
file = sys.stderr)
# This is stage 4 of segment processing.
# This function may set start_unk_padding and end_unk_padding to nonzero
# values. This is done if the current boundary words are real, scored
# words and we're not next to the beginning or end of the utterance.
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words:
# we can consider adding unk-padding.
if b: # start of utterance.
unk_padding = args.unk_padding
if unk_padding > this_start_time: # close to beginning of file
unk_padding = this_start_time
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.start_unk_padding = unk_padding
else: # end of utterance.
this_end_time = this_start_time + float(this_split_line[3])
last_line = self.split_lines_of_utt[-1]
utterance_end_time = float(last_line[2]) + float(last_line[3])
max_allowable_padding = utterance_end_time - this_end_time
assert max_allowable_padding > -0.01
unk_padding = args.unk_padding
if unk_padding > max_allowable_padding:
unk_padding = max_allowable_padding
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.end_unk_padding = unk_padding
# This function will merge the segment in 'other' with the segment
# in 'self'. It is only to be called when 'self' and 'other' are from
# the same utterance, 'other' is after 'self' in time order (based on
# the original segment cores), and self.EndTime() >= other.StartTime().
# Note: in this situation there will normally be deleted words
# between the two segments. What this program does with the deleted
# words depends on '--max-deleted-words-kept-when-merging'. If there
# were any inserted words in the transcript (less likely), this
# program will keep the reference.
def MergeWithSegment(self, other):
assert self.EndTime() >= other.StartTime() and \
self.StartTime() < other.EndTime() and \
self.split_lines_of_utt is other.split_lines_of_utt
orig_self_end_index = self.end_index
self.debug_str = "({0}/merged-with/{1})".format(self.debug_str, other.debug_str)
# everything that relates to the end of this segment gets copied
# from 'other'.
self.end_index = other.end_index
self.end_unk_padding = other.end_unk_padding
self.end_keep_proportion = other.end_keep_proportion
# The next thing we have to do is to go over any lines of the ctm that
# appear between 'self' and 'other', or are shared between both (this
# would only happen for tainted silence or non-scored-word segments),
# and decide what to do with them. We'll keep the reference for any
# substitutions or insertions (which anyway are unlikely to appear
# in these merged segments). Note: most of this happens in self.Text(),
# but at this point we need to decide whether to mark any deletions
# as 'discard-this-word'.
first_index_of_overlap = min(orig_self_end_index - 1, other.start_index)
last_index_of_overlap = max(orig_self_end_index - 1, other.start_index)
num_deleted_words = 0
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
edit_type = self.split_lines_of_utt[i][7]
if edit_type == 'del':
num_deleted_words += 1
if num_deleted_words > args.max_deleted_words_kept_when_merging:
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
if self.split_lines_of_utt[i][7] == 'del':
self.split_lines_of_utt[i].append('do-not-include-in-text')
# Returns the start time of the utterance (within the enclosing utterance)
# This is before any rounding.
def StartTime(self):
first_line = self.split_lines_of_utt[self.start_index]
first_line_start = float(first_line[2])
first_line_duration = float(first_line[3])
first_line_end = first_line_start + first_line_duration
return first_line_end - self.start_unk_padding \
- (first_line_duration * self.start_keep_proportion)
# Returns some string-valued information about 'this' that is useful for debugging.
def DebugInfo(self):
return 'start=%d,end=%d,unk-padding=%.2f,%.2f,keep-proportion=%.2f,%.2f,' % \
(self.start_index, self.end_index, self.start_unk_padding,
self.end_unk_padding, self.start_keep_proportion, self.end_keep_proportion) + \
self.debug_str
# Returns the start time of the utterance (within the enclosing utterance)
def EndTime(self):
last_line = self.split_lines_of_utt[self.end_index - 1]
last_line_start = float(last_line[2])
last_line_duration = float(last_line[3])
return last_line_start + (last_line_duration * self.end_keep_proportion) \
+ self.end_unk_padding
# Returns the segment length in seconds.
def Length(self):
return self.EndTime() - self.StartTime()
def IsWholeUtterance(self):
# returns true if this segment corresponds to the whole utterance that
# it's a part of (i.e. its start/end time are zero and the end-time of
# the last segment.
last_line_of_utt = self.split_lines_of_utt[-1]
last_line_end_time = float(last_line_of_utt[2]) + float(last_line_of_utt[3])
return abs(self.StartTime() - 0.0) < 0.001 and \
abs(self.EndTime() - last_line_end_time) < 0.001
# Returns the proportion of the duration of this segment that consists of
# unk-padding and tainted lines of input (will be between 0.0 and 1.0).
def JunkProportion(self):
# Note: only the first and last lines could possibly be tainted as
# that's how we create the segments; and if either or both are tainted
# the utterance must contain other lines, so double-counting is not a
# problem.
junk_duration = self.start_unk_padding + self.end_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
junk_duration += first_duration * self.start_keep_proportion
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
junk_duration += last_duration * self.end_keep_proportion
return junk_duration / self.Length()
# This function will remove something from the beginning of the
# segment if it's possible to cleanly lop off a bit that contains
# more junk, as a proportion of its length, than 'args.junk_proportion'.
# Junk is defined as unk-padding and/or tainted segments.
# It considers as a potential split point, the first silence
# segment or non-tainted non-scored-word segment in the
# utterance. See also TruncateEndForJunkProportion
def PossiblyTruncateStartForJunkProportion(self):
begin_junk_duration = self.start_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
begin_junk_duration += first_duration * self.start_keep_proportion
if begin_junk_duration == 0.0:
# nothing to do.
return
candidate_start_index = None
# the following iterates over all lines internal to the utterance.
for i in range(self.start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the left boundary of
# the new utterance and discarding the piece to the left of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_start_index = i
candidate_start_time = float(this_split_line[2])
break # Consider only the first potential truncation.
if candidate_start_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = candidate_start_time - self.StartTime()
if begin_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.start_index = candidate_start_index
self.start_unk_padding = 0.0
self.start_keep_proportion = 1.0
self.debug_str += ',truncated-start-for-junk'
# This is like PossiblyTruncateStartForJunkProportion(), but
# acts on the end of the segment; see comments there.
def PossiblyTruncateEndForJunkProportion(self):
end_junk_duration = self.end_unk_padding
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
end_junk_duration += last_duration * self.end_keep_proportion
if end_junk_duration == 0.0:
# nothing to do.
return
candidate_end_index = None
# the following iterates over all lines internal to the utterance
# (starting from the end).
for i in reversed(range(self.start_index + 1, self.end_index - 1)):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
# (i.e. making the silence or non-scored word the right boundary of
# the new utterance and discarding the piece to the right of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_end_index = i + 1 # note: end-indexes are one past the last.
candidate_end_time = float(this_split_line[2]) + float(this_split_line[3])
break # Consider only the latest potential truncation.
if candidate_end_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = self.EndTime() - candidate_end_time
if end_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.end_index = candidate_end_index
self.end_unk_padding = 0.0
self.end_keep_proportion = 1.0
self.debug_str += ',truncated-end-for-junk'
# this will return true if there is at least one word in the utterance
# that's a scored word (not a non-scored word) and not an OOV word that's
# realized as unk. This becomes a filter on keeping segments.
def ContainsAtLeastOneScoredNonOovWord(self):
global non_scored_words
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_hyp_word = this_split_line[4]
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words \
and this_ref_word == this_hyp_word:
return True
return False
# Returns the text corresponding to this utterance, as a string.
def Text(self):
global oov_symbol
text_array = []
if self.start_unk_padding != 0.0:
text_array.append(oov_symbol)
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_ref_word != '<eps>' and this_split_line[-1] != 'do-not-include-in-text':
text_array.append(this_ref_word)
if self.end_unk_padding != 0.0:
text_array.append(oov_symbol)
return ' '.join(text_array)
# Here, 'text' will be something that indicates the stage of processing,
# e.g. 'Stage 0: segment cores', 'Stage 1: add tainted lines',
#, etc.
def AccumulateSegmentStats(segment_list, text):
global segment_total_length, num_segments
for segment in segment_list:
num_segments[text] += 1
segment_total_length[text] += segment.Length()
def PrintSegmentStats():
global segment_total_length, num_segments, \
num_utterances, num_utterances_without_segments, \
total_length_of_utterances
print('Number of utterances is %d, of which %.2f%% had no segments after '
'all processing; total length of data in original utterances (in seconds) '
'was %d' % (num_utterances,
num_utterances_without_segments * 100.0 / num_utterances,
total_length_of_utterances),
file = sys.stderr)
keys = sorted(segment_total_length.keys())
for i in range(len(keys)):
key = keys[i]
if i > 0:
delta_percentage = '[%+.2f%%]' % ((segment_total_length[key] - segment_total_length[keys[i-1]])
* 100.0 / total_length_of_utterances)
print('At %s, num-segments is %d, total length %.2f%% of original total %s' % (
key, num_segments[key],
segment_total_length[key] * 100.0 / total_length_of_utterances,
delta_percentage if i > 0 else ''),
file = sys.stderr)
# This function creates the segments for an utterance as a list
# of class Segment.
# It returns a 2-tuple (list-of-segments, list-of-deleted-segments)
# where the deleted segments are only useful for diagnostic printing.
# Note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
def GetSegmentsForUtterance(split_lines_of_utt):
global num_utterances, num_utterances_without_segments, total_length_of_utterances
num_utterances += 1
segment_ranges = ComputeSegmentCores(split_lines_of_utt)
utterance_end_time = float(split_lines_of_utt[-1][2]) + float(split_lines_of_utt[-1][3])
total_length_of_utterances += utterance_end_time
segments = [ Segment(split_lines_of_utt, x[0], x[1])
for x in segment_ranges ]
AccumulateSegmentStats(segments, 'stage 0 [segment cores]')
for segment in segments:
segment.PossiblyAddTaintedLines()
AccumulateSegmentStats(segments, 'stage 1 [add tainted lines]')
new_segments = []
for s in segments:
new_segments += s.PossiblySplitSegment()
segments = new_segments
AccumulateSegmentStats(segments, 'stage 2 [split segments]')
for s in segments:
s.PossiblyTruncateBoundaries()
AccumulateSegmentStats(segments, 'stage 3 [truncate boundaries]')
for s in segments:
s.RelaxBoundaryTruncation()
AccumulateSegmentStats(segments, 'stage 4 [relax boundary truncation]')
for s in segments:
s.PossiblyAddUnkPadding()
AccumulateSegmentStats(segments, 'stage 5 [unk-padding]')
deleted_segments = []
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if (not s.IsWholeUtterance() and s.Length() < 0.999 * args.min_new_segment_length):
s.debug_str += '[deleted-because-of--min-new-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 6 [remove new segments under --min-new-segment-length')
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if s.Length() < 0.999 * args.min_segment_length:
s.debug_str += '[deleted-because-of--min-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 7 [remove segments under --min-segment-length')
for s in segments:
s.PossiblyTruncateStartForJunkProportion()
AccumulateSegmentStats(segments, 'stage 8 [truncate segment-starts for --max-junk-proportion')
for s in segments:
s.PossiblyTruncateEndForJunkProportion()
AccumulateSegmentStats(segments, 'stage 9 [truncate segment-ends for --max-junk-proportion')
new_segments = []
for s in segments:
if s.ContainsAtLeastOneScoredNonOovWord():
new_segments.append(s)
else:
s.debug_str += '[deleted-because-no-scored-non-oov-words]'
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 10 [remove segments without scored,non-OOV words]')
new_segments = []
for s in segments:
j = s.JunkProportion()
if j <= args.max_junk_proportion:
new_segments.append(s)
else:
s.debug_str += '[deleted-because-junk-proportion={0}]'.format(j)
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 11 [remove segments with junk exceeding --max-junk-proportion]')
new_segments = []
if len(segments) > 0:
new_segments.append(segments[0])
for i in range(1, len(segments)):
if new_segments[-1].EndTime() >= segments[i].StartTime():
new_segments[-1].MergeWithSegment(segments[i])
else:
new_segments.append(segments[i])
segments = new_segments
AccumulateSegmentStats(segments, 'stage 12 [merge overlapping or touching segments]')
for i in range(len(segments) - 1):
if segments[i].EndTime() > segments[i+1].StartTime():
# this just adds something to --ctm-edits-out output
segments[i+1].debug_str += ",overlaps-previous-segment"
if len(segments) == 0:
num_utterances_without_segments += 1
return (segments, deleted_segments)
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
# Gives time in string form as an exact multiple of the frame-length, e.g. 0.01
# (after rounding).
def TimeToString(time, frame_length):
n = round(time / frame_length)
assert n >= 0
# The next function call will remove trailing zeros while printing it, so
# that e.g. 0.01 will be printed as 0.01 and not 0.0099999999999999. It
# seems that doing this in a simple way is not really possible (at least,
# not without assuming that frame_length is of the form 10^-n, which we
# don't really want to do).
return FloatToString(n * frame_length)
def WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
old_utterance_name, segments):
for n in range(len(segments)):
segment = segments[n]
# split utterances will be named foo-bar-1 foo-bar-2, etc.
new_utterance_name = old_utterance_name + "-" + str(n + 1)
# print a line to the text output of the form like
# <new-utterance-id> <text>
# like:
# foo-bar-1 hello this is dan
print(new_utterance_name, segment.Text(), file = text_output_handle)
# print a line to the segments output of the form
# <new-utterance-id> <old-utterance-id> <start-time> <end-time>
# like:
# foo-bar-1 foo-bar 5.1 7.2
print(new_utterance_name, old_utterance_name,
TimeToString(segment.StartTime(), args.frame_length),
TimeToString(segment.EndTime(), args.frame_length),
file = segments_output_handle)
# Note, this is destrutive of 'segments_for_utterance', but it won't matter.
def PrintDebugInfoForUtterance(ctm_edits_out_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance):
# info_to_print will be list of 2-tuples (time, 'start-segment-n'|'end-segment-n')
# representing the start or end times of segments.
info_to_print = []
for n in range(len(segments_for_utterance)):
segment = segments_for_utterance[n]
start_string = 'start-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
# for segments that were deleted we print info like start-deleted-segment-1, and
# otherwise similar info to segments that were retained.
for n in range(len(deleted_segments_for_utterance)):
segment = deleted_segments_for_utterance[n]
start_string = 'start-deleted-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-deleted-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
info_to_print = sorted(info_to_print)
for i in range(len(split_lines_of_cur_utterance)):
split_line=split_lines_of_cur_utterance[i]
split_line[0] += '[' + str(i) + ']' # add an index like [0], [1], to
# the utterance-id so we can easily
# look up segment indexes.
start_time = float(split_line[2])
end_time = start_time + float(split_line[3])
split_line_copy = list(split_line)
while len(info_to_print) > 0 and info_to_print[0][0] <= end_time:
(segment_start, string) = info_to_print[0]
# shift the first element off of info_to_print.
info_to_print = info_to_print[1:]
# add a field like 'start-segment1[...]=3.21' to what we're about to print.
split_line_copy.append(string + "=" + TimeToString(segment_start, args.frame_length))
print(' '.join(split_line_copy), file = ctm_edits_out_handle)
# This accumulates word-level stats about, for each reference word, with what
# probability it will end up in the core of a segment. Words with low
# probabilities of being in segments will generally be associated with some kind
# of error (there is a higher probability of having a wrong lexicon entry).
def AccWordStatsForUtterance(split_lines_of_utt,
segments_for_utterance):
# word_count_pair is a map from a string (the word) to
# a list [total-count, count-not-within-segments]
global word_count_pair
line_is_in_segment = [ False ] * len(split_lines_of_utt)
for segment in segments_for_utterance:
for i in range(segment.start_index, segment.end_index):
line_is_in_segment[i] = True
for i in range(len(split_lines_of_utt)):
this_ref_word = split_lines_of_utt[i][6]
if this_ref_word != '<eps>':
word_count_pair[this_ref_word][0] += 1
if not line_is_in_segment[i]:
word_count_pair[this_ref_word][1] += 1
def PrintWordStats(word_stats_out):
try:
f = open(word_stats_out, 'w')
except:
sys.exit("segment_ctm_edits.py: error opening word-stats file --word-stats-out={0} "
"for writing".format(word_stats_out))
global word_count_pair
# Sort from most to least problematic. We want to give more prominence to
# words that are most frequently not in segments, but also to high-count
# words. Define badness = pair[1] / pair[0], and total_count = pair[0],
# where 'pair' is a value of word_count_pair. We'll reverse sort on
# badness^3 * total_count = pair[1]^3 / pair[0]^2.
for key, pair in sorted(word_count_pair.items(),
key = lambda item: (item[1][1] ** 3) * 1.0 / (item[1][0] ** 2),
reverse = True):
badness = pair[1] * 1.0 / pair[0]
total_count = pair[0]
print(key, badness, total_count, file = f)
try:
f.close()
except:
sys.exit("segment_ctm_edits.py: error closing file --word-stats-out={0} "
"(full disk?)".format(word_stats_out))
print("segment_ctm_edits.py: please see the file {0} for word-level statistics "
"saying how frequently each word was excluded for a segment; format is "
"<word> <proportion-of-time-excluded> <total-count>. Particularly "
"problematic words appear near the top of the file.".format(word_stats_out),
file = sys.stderr)
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
text_output_handle = open(args.text_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening text output "
"file {0}".format(args.text_out))
try:
segments_output_handle = open(args.segments_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening segments output "
"file {0}".format(args.text_out))
if args.ctm_edits_out != None:
try:
ctm_edits_output_handle = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
(segments_for_utterance,
deleted_segments_for_utterance) = GetSegmentsForUtterance(split_lines_of_cur_utterance)
AccWordStatsForUtterance(split_lines_of_cur_utterance, segments_for_utterance)
WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
cur_utterance, segments_for_utterance)
if args.ctm_edits_out != None:
PrintDebugInfoForUtterance(ctm_edits_output_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
text_output_handle.close()
segments_output_handle.close()
if args.ctm_edits_out != None:
ctm_edits_output_handle.close()
except:
sys.exit("modify_ctm_edits.py: error closing one or more outputs "
"(broken pipe or full disk?)")
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
oov_symbol = None
if args.oov_symbol_file != None:
try:
with open(args.oov_symbol_file) as f:
line = f.readline()
assert len(line.split()) == 1
oov_symbol = line.split()[0]
assert f.readline() == ''
except Exception as e:
sys.exit("segment_ctm_edits.py: error reading file --oov-symbol-file=" +
args.oov_symbol_file + ", error is: " + str(e))
elif args.unk_padding != 0.0:
sys.exit("segment_ctm_edits.py: if the --unk-padding option is nonzero (which "
"it is by default, the --oov-symbol-file option must be supplied.")
# segment_total_length and num_segments are maps from
# 'stage' strings; see AccumulateSegmentStats for details.
segment_total_length = defaultdict(int)
num_segments = defaultdict(int)
# the lambda expression below is an anonymous function that takes no arguments
# and returns the new list [0, 0].
word_count_pair = defaultdict(lambda: [0, 0])
num_utterances = 0
num_utterances_without_segments = 0
total_length_of_utterances = 0
ProcessData()
PrintSegmentStats()
if args.word_stats_out != None:
PrintWordStats(args.word_stats_out)
if args.ctm_edits_out != None:
print("segment_ctm_edits.py: detailed utterance-level debug information "
"is in " + args.ctm_edits_out, file = sys.stderr)
| 51.010618 | 125 | 0.641058 |
from __future__ import print_function
import sys, operator, argparse, os
from collections import defaultdict
parser = argparse.ArgumentParser(
description = "This program produces segmentation and text information "
"based on reading ctm-edits input format which is produced by "
"steps/cleanup/internal/get_ctm_edits.py, steps/cleanup/internal/modify_ctm_edits.py and "
"steps/cleanup/internal/taint_ctm_edits.py.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--min-segment-length", type = float, default = 0.5,
help = "Minimum allowed segment length (in seconds) for any "
"segment; shorter segments than this will be discarded.")
parser.add_argument("--min-new-segment-length", type = float, default = 1.0,
help = "Minimum allowed segment length (in seconds) for newly "
"created segments (i.e. not identical to the input utterances). "
"Expected to be >= --min-segment-length.")
parser.add_argument("--frame-length", type = float, default = 0.01,
help = "This only affects rounding of the output times; they will "
"be constrained to multiples of this value.")
parser.add_argument("--max-tainted-length", type = float, default = 0.05,
help = "Maximum allowed length of any 'tainted' line. Note: "
"'tainted' lines may only appear at the boundary of a "
"segment")
parser.add_argument("--max-edge-silence-length", type = float, default = 0.5,
help = "Maximum allowed length of silence if it appears at the "
"edge of a segment (will be truncated). This rule is "
"relaxed if such truncation would take a segment below "
"the --min-segment-length or --min-new-segment-length.")
parser.add_argument("--max-edge-non-scored-length", type = float, default = 0.5,
help = "Maximum allowed length of a non-scored word (noise, cough, etc.) "
"if it appears at the edge of a segment (will be truncated). "
"This rule is relaxed if such truncation would take a "
"segment below the --min-segment-length.")
parser.add_argument("--max-internal-silence-length", type = float, default = 2.0,
help = "Maximum allowed length of silence if it appears inside a segment "
"(will cause the segment to be split).")
parser.add_argument("--max-internal-non-scored-length", type = float, default = 2.0,
help = "Maximum allowed length of a non-scored word (noise, etc.) if "
"it appears inside a segment (will cause the segment to be "
"split). Note: reference words which are real words but OOV "
"are not included in this category.")
parser.add_argument("--unk-padding", type = float, default = 0.05,
help = "Amount of padding with <unk> that we do if a segment boundary is "
"next to errors (ins, del, sub). That is, we add this amount of "
"time to the segment and add the <unk> word to cover the acoustics. "
"If nonzero, the --oov-symbol-file option must be supplied.")
parser.add_argument("--max-junk-proportion", type = float, default = 0.1,
help = "Maximum proportion of the time of the segment that may "
"consist of potentially bad data, in which we include 'tainted' lines of "
"the ctm-edits input and unk-padding.")
parser.add_argument("--max-deleted-words-kept-when-merging", type = str, default = 1,
help = "When merging segments that are found to be overlapping or "
"adjacent after all other processing, keep in the transcript the "
"reference words that were deleted between the segments [if any] "
"as long as there were no more than this many reference words. "
"Setting this to zero will mean that any reference words that "
"were deleted between the segments we're about to reattach will "
"not appear in the generated transcript (so we'll match the hyp).")
parser.add_argument("--oov-symbol-file", type = str, default = None,
help = "Filename of file such as data/lang/oov.txt which contains "
"the text form of the OOV word, normally '<unk>'. Supplied as "
"a file to avoid complications with escaping. Necessary if "
"the --unk-padding option has a nonzero value (which it does "
"by default.")
parser.add_argument("--ctm-edits-out", type = str,
help = "Filename to output an extended version of the ctm-edits format "
"with segment start and end points noted. This file is intended to be "
"read by humans; there are currently no scripts that will read it.")
parser.add_argument("--word-stats-out", type = str,
help = "Filename for output of word-level stats, of the form "
"'<word> <bad-proportion> <total-count-in-ref>', e.g. 'hello 0.12 12408', "
"where the <bad-proportion> is the proportion of the time that this "
"reference word does not make it into a segment. It can help reveal words "
"that have problematic pronunciations or are associated with "
"transcription errors.")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/internal/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("text_out", metavar = "<text-out>",
help = "Filename of output text file (same format as data/train/text, i.e. "
"<new-utterance-id> <word1> <word2> ... <wordN>")
parser.add_argument("segments_out", metavar = "<segments-out>",
help = "Filename of output segments. This has the same format as data/train/segments, "
"but instead of <recording-id>, the second field is the old utterance-id, i.e "
"<new-utterance-id> <old-utterance-id> <start-time> <end-time>")
args = parser.parse_args()
def IsTainted(split_line_of_utt):
return len(split_line_of_utt) > 8 and split_line_of_utt[8] == 'tainted'
# any adjacent tainted words (or parts of them).
def ComputeSegmentCores(split_lines_of_utt):
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [ False] * num_lines
for i in range(num_lines):
if split_lines_of_utt[i][7] == 'cor' and \
split_lines_of_utt[i][4] == split_lines_of_utt[i][6]:
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i-1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i+1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if not IsTainted(split_lines_of_utt[i]) and \
(edit_type == 'cor' or edit_type == 'sil' or edit_type == 'fix'):
line_is_in_segment_core[i] = True
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start == None:
cur_segment_start = i
else:
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, i) )
cur_segment_start = None
if cur_segment_start != None:
segment_ranges.append( (cur_segment_start, num_lines) )
return segment_ranges
class Segment:
def __init__(self, split_lines_of_utt, start_index, end_index, debug_str = None):
self.split_lines_of_utt = split_lines_of_utt
# start_index is the index of the first line that appears in this
# segment, and end_index is one past the last line. This does not
# include unk-padding.
self.start_index = start_index
self.end_index = end_index
# If the following values are nonzero, then when we create the segment
# we will add <unk> at the start and end of the segment [representing
# partial words], with this amount of additional audio.
self.start_unk_padding = 0.0
self.end_unk_padding = 0.0
# debug_str keeps track of the 'core' of the segment.
if debug_str == None:
debug_str = 'core-start={0},core-end={1}'.format(start_index,end_index)
self.debug_str = debug_str
# This gives the proportion of the time of the first line in the segment
# that we keep. Usually 1.0 but may be less if we've trimmed away some
self.start_keep_proportion = 1.0
# proportion of the time.
self.end_keep_proportion = 1.0
# This is stage 1 of segment processing (after creating the boundaries of the
# core of the segment, which is done outside of this class).a
#
# This function may reduce start_index and/or increase end_index by
# including a single adjacent 'tainted' line from the ctm-edits file. This
# is only done if the lines at the boundaries of the segment are currently
# real non-silence words and not non-scored words. The idea is that we
# probably don't want to start or end the segment right at the boundary of a
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
for b in [False, True]:
if b:
boundary_index = self.end_index - 1
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = self.start_index - 1
if adjacent_index >= 0 and adjacent_index < len(split_lines_of_utt):
# only consider merging the adjacent word into the segment if we're not
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
# another stronger reason why we didn't include it in the core
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
if boundary_edit_type == 'cor' and \
not boundary_hyp_word in non_scored_words:
if b:
self.end_index += 1
else:
self.start_index -= 1
def PossiblySplitSegment(self):
global non_scored_words, args
assert self.start_unk_padding == 0.0 and self.end_unk_padding == 0.0 and \
self.start_keep_proportion == 1.0 and self.end_keep_proportion == 1.0
segments = [] # the answer
cur_start_index = self.start_index
cur_start_is_split = False
# only consider splitting at non-boundary lines. [we'd just truncate
for index_to_split_at in range(cur_start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (this_edit_type == 'sil' and this_duration > args.max_internal_silence_length) or \
(this_ref_word in non_scored_words and this_duration > args.max_internal_non_scored_length):
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
index_to_split_at + 1, self.debug_str)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if len(segments) == 0:
segments.append(self)
else:
new_segment = Segment(self.split_lines_of_utt, cur_start_index,
self.end_index, self.debug_str)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
# --min-segment-length or --min-new-segment-length).
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_edit == 'sil' and \
this_duration > args.max_edge_silence_length:
truncated_duration = args.max_edge_silence_length
elif this_ref_word in non_scored_words and \
this_duration > args.max_edge_non_scored_length:
truncated_duration = args.max_edge_non_scored_length
if truncated_duration != None:
keep_proportion = truncated_duration / this_duration
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
# This relaxes the segment-boundary truncation of
# PossiblyTruncateBoundaries(), if it would take us below
# min-new-segment-length or min-segment-length. Note: this does not relax
# the boundary truncation for a particular boundary (start or end) if that
# boundary corresponds to a 'tainted' line of the ctm (because it's
def RelaxBoundaryTruncation(self):
assert self.start_unk_padding == self.end_unk_padding == 0.0
if self.start_keep_proportion == self.end_keep_proportion == 1.0:
return
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if length_with_truncation >= length_cutoff:
return
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if not IsTainted(self.split_lines_of_utt[self.start_index]):
self.start_keep_proportion = 1.0
if not IsTainted(self.split_lines_of_utt[self.end_index - 1]):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if length_with_relaxed_boundaries <= length_cutoff:
# removed later on (but it may not, if removing truncation makes us
# identical to the input utterance, and the length is between
# min_segment_length min_new_segment_length).
return
# Next, compute an interpolation constant a such that the
# {start,end}_keep_proportion values will equal a *
# [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values].
# we're solving the equation:
a = (length_cutoff - length_with_relaxed_boundaries) / \
(length_with_truncation - length_with_relaxed_boundaries)
if a < 0.0 or a > 1.0:
print("segment_ctm_edits.py: bad 'a' value = {0}".format(a), file = sys.stderr)
return
self.start_keep_proportion = \
a * orig_start_keep_proportion + (1-a) * self.start_keep_proportion
self.end_keep_proportion = \
a * orig_end_keep_proportion + (1-a) * self.end_keep_proportion
if not abs(self.Length() - length_cutoff) < 0.01:
print("segment_ctm_edits.py: possible problem relaxing boundary "
"truncation, length is {0} vs {1}".format(self.Length(), length_cutoff),
file = sys.stderr)
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words:
# we can consider adding unk-padding.
if b: # start of utterance.
unk_padding = args.unk_padding
if unk_padding > this_start_time: # close to beginning of file
unk_padding = this_start_time
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.start_unk_padding = unk_padding
else:
this_end_time = this_start_time + float(this_split_line[3])
last_line = self.split_lines_of_utt[-1]
utterance_end_time = float(last_line[2]) + float(last_line[3])
max_allowable_padding = utterance_end_time - this_end_time
assert max_allowable_padding > -0.01
unk_padding = args.unk_padding
if unk_padding > max_allowable_padding:
unk_padding = max_allowable_padding
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.end_unk_padding = unk_padding
# This function will merge the segment in 'other' with the segment
# in 'self'. It is only to be called when 'self' and 'other' are from
# the same utterance, 'other' is after 'self' in time order (based on
# the original segment cores), and self.EndTime() >= other.StartTime().
# Note: in this situation there will normally be deleted words
# between the two segments. What this program does with the deleted
# words depends on '--max-deleted-words-kept-when-merging'. If there
# were any inserted words in the transcript (less likely), this
# program will keep the reference.
def MergeWithSegment(self, other):
assert self.EndTime() >= other.StartTime() and \
self.StartTime() < other.EndTime() and \
self.split_lines_of_utt is other.split_lines_of_utt
orig_self_end_index = self.end_index
self.debug_str = "({0}/merged-with/{1})".format(self.debug_str, other.debug_str)
# everything that relates to the end of this segment gets copied
# from 'other'.
self.end_index = other.end_index
self.end_unk_padding = other.end_unk_padding
self.end_keep_proportion = other.end_keep_proportion
# The next thing we have to do is to go over any lines of the ctm that
# appear between 'self' and 'other', or are shared between both (this
# would only happen for tainted silence or non-scored-word segments),
# and decide what to do with them. We'll keep the reference for any
first_index_of_overlap = min(orig_self_end_index - 1, other.start_index)
last_index_of_overlap = max(orig_self_end_index - 1, other.start_index)
num_deleted_words = 0
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
edit_type = self.split_lines_of_utt[i][7]
if edit_type == 'del':
num_deleted_words += 1
if num_deleted_words > args.max_deleted_words_kept_when_merging:
for i in range(first_index_of_overlap, last_index_of_overlap + 1):
if self.split_lines_of_utt[i][7] == 'del':
self.split_lines_of_utt[i].append('do-not-include-in-text')
def StartTime(self):
first_line = self.split_lines_of_utt[self.start_index]
first_line_start = float(first_line[2])
first_line_duration = float(first_line[3])
first_line_end = first_line_start + first_line_duration
return first_line_end - self.start_unk_padding \
- (first_line_duration * self.start_keep_proportion)
def DebugInfo(self):
return 'start=%d,end=%d,unk-padding=%.2f,%.2f,keep-proportion=%.2f,%.2f,' % \
(self.start_index, self.end_index, self.start_unk_padding,
self.end_unk_padding, self.start_keep_proportion, self.end_keep_proportion) + \
self.debug_str
def EndTime(self):
last_line = self.split_lines_of_utt[self.end_index - 1]
last_line_start = float(last_line[2])
last_line_duration = float(last_line[3])
return last_line_start + (last_line_duration * self.end_keep_proportion) \
+ self.end_unk_padding
def Length(self):
return self.EndTime() - self.StartTime()
def IsWholeUtterance(self):
# the last segment.
last_line_of_utt = self.split_lines_of_utt[-1]
last_line_end_time = float(last_line_of_utt[2]) + float(last_line_of_utt[3])
return abs(self.StartTime() - 0.0) < 0.001 and \
abs(self.EndTime() - last_line_end_time) < 0.001
# Returns the proportion of the duration of this segment that consists of
# unk-padding and tainted lines of input (will be between 0.0 and 1.0).
def JunkProportion(self):
# Note: only the first and last lines could possibly be tainted as
# that's how we create the segments; and if either or both are tainted
junk_duration = self.start_unk_padding + self.end_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
junk_duration += first_duration * self.start_keep_proportion
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
junk_duration += last_duration * self.end_keep_proportion
return junk_duration / self.Length()
# more junk, as a proportion of its length, than 'args.junk_proportion'.
# Junk is defined as unk-padding and/or tainted segments.
# It considers as a potential split point, the first silence
# segment or non-tainted non-scored-word segment in the
# utterance. See also TruncateEndForJunkProportion
def PossiblyTruncateStartForJunkProportion(self):
begin_junk_duration = self.start_unk_padding
first_split_line = self.split_lines_of_utt[self.start_index]
if IsTainted(first_split_line):
first_duration = float(first_split_line[3])
begin_junk_duration += first_duration * self.start_keep_proportion
if begin_junk_duration == 0.0:
# nothing to do.
return
candidate_start_index = None
# the following iterates over all lines internal to the utterance.
for i in range(self.start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# We'll consider splitting on silence and on non-scored words.
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_start_index = i
candidate_start_time = float(this_split_line[2])
break
if candidate_start_index == None:
return
candidate_removed_piece_duration = candidate_start_time - self.StartTime()
if begin_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return
self.start_index = candidate_start_index
self.start_unk_padding = 0.0
self.start_keep_proportion = 1.0
self.debug_str += ',truncated-start-for-junk'
def PossiblyTruncateEndForJunkProportion(self):
end_junk_duration = self.end_unk_padding
last_split_line = self.split_lines_of_utt[self.end_index - 1]
if IsTainted(last_split_line):
last_duration = float(last_split_line[3])
end_junk_duration += last_duration * self.end_keep_proportion
if end_junk_duration == 0.0:
return
candidate_end_index = None
for i in reversed(range(self.start_index + 1, self.end_index - 1)):
this_split_line = self.split_lines_of_utt[i]
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
# (i.e. making the silence or non-scored word the right boundary of
# the new utterance and discarding the piece to the right of that).
if this_edit_type == 'sil' or \
(this_edit_type == 'cor' and this_ref_word in non_scored_words):
candidate_end_index = i + 1 # note: end-indexes are one past the last.
candidate_end_time = float(this_split_line[2]) + float(this_split_line[3])
break # Consider only the latest potential truncation.
if candidate_end_index == None:
return # Nothing to do as there is no place to split.
candidate_removed_piece_duration = self.EndTime() - candidate_end_time
if end_junk_duration / candidate_removed_piece_duration < args.max_junk_proportion:
return # Nothing to do as the candidate piece to remove has too
# little junk.
# OK, remove the piece.
self.end_index = candidate_end_index
self.end_unk_padding = 0.0
self.end_keep_proportion = 1.0
self.debug_str += ',truncated-end-for-junk'
# this will return true if there is at least one word in the utterance
# that's a scored word (not a non-scored word) and not an OOV word that's
# realized as unk. This becomes a filter on keeping segments.
def ContainsAtLeastOneScoredNonOovWord(self):
global non_scored_words
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_hyp_word = this_split_line[4]
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == 'cor' and not this_ref_word in non_scored_words \
and this_ref_word == this_hyp_word:
return True
return False
# Returns the text corresponding to this utterance, as a string.
def Text(self):
global oov_symbol
text_array = []
if self.start_unk_padding != 0.0:
text_array.append(oov_symbol)
for i in range(self.start_index, self.end_index):
this_split_line = self.split_lines_of_utt[i]
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_ref_word != '<eps>' and this_split_line[-1] != 'do-not-include-in-text':
text_array.append(this_ref_word)
if self.end_unk_padding != 0.0:
text_array.append(oov_symbol)
return ' '.join(text_array)
# Here, 'text' will be something that indicates the stage of processing,
# e.g. 'Stage 0: segment cores', 'Stage 1: add tainted lines',
#, etc.
def AccumulateSegmentStats(segment_list, text):
global segment_total_length, num_segments
for segment in segment_list:
num_segments[text] += 1
segment_total_length[text] += segment.Length()
def PrintSegmentStats():
global segment_total_length, num_segments, \
num_utterances, num_utterances_without_segments, \
total_length_of_utterances
print('Number of utterances is %d, of which %.2f%% had no segments after '
'all processing; total length of data in original utterances (in seconds) '
'was %d' % (num_utterances,
num_utterances_without_segments * 100.0 / num_utterances,
total_length_of_utterances),
file = sys.stderr)
keys = sorted(segment_total_length.keys())
for i in range(len(keys)):
key = keys[i]
if i > 0:
delta_percentage = '[%+.2f%%]' % ((segment_total_length[key] - segment_total_length[keys[i-1]])
* 100.0 / total_length_of_utterances)
print('At %s, num-segments is %d, total length %.2f%% of original total %s' % (
key, num_segments[key],
segment_total_length[key] * 100.0 / total_length_of_utterances,
delta_percentage if i > 0 else ''),
file = sys.stderr)
# This function creates the segments for an utterance as a list
# of class Segment.
# It returns a 2-tuple (list-of-segments, list-of-deleted-segments)
# where the deleted segments are only useful for diagnostic printing.
# Note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
def GetSegmentsForUtterance(split_lines_of_utt):
global num_utterances, num_utterances_without_segments, total_length_of_utterances
num_utterances += 1
segment_ranges = ComputeSegmentCores(split_lines_of_utt)
utterance_end_time = float(split_lines_of_utt[-1][2]) + float(split_lines_of_utt[-1][3])
total_length_of_utterances += utterance_end_time
segments = [ Segment(split_lines_of_utt, x[0], x[1])
for x in segment_ranges ]
AccumulateSegmentStats(segments, 'stage 0 [segment cores]')
for segment in segments:
segment.PossiblyAddTaintedLines()
AccumulateSegmentStats(segments, 'stage 1 [add tainted lines]')
new_segments = []
for s in segments:
new_segments += s.PossiblySplitSegment()
segments = new_segments
AccumulateSegmentStats(segments, 'stage 2 [split segments]')
for s in segments:
s.PossiblyTruncateBoundaries()
AccumulateSegmentStats(segments, 'stage 3 [truncate boundaries]')
for s in segments:
s.RelaxBoundaryTruncation()
AccumulateSegmentStats(segments, 'stage 4 [relax boundary truncation]')
for s in segments:
s.PossiblyAddUnkPadding()
AccumulateSegmentStats(segments, 'stage 5 [unk-padding]')
deleted_segments = []
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if (not s.IsWholeUtterance() and s.Length() < 0.999 * args.min_new_segment_length):
s.debug_str += '[deleted-because-of--min-new-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 6 [remove new segments under --min-new-segment-length')
new_segments = []
for s in segments:
# the 0.999 allows for roundoff error.
if s.Length() < 0.999 * args.min_segment_length:
s.debug_str += '[deleted-because-of--min-segment-length]'
deleted_segments.append(s)
else:
new_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 7 [remove segments under --min-segment-length')
for s in segments:
s.PossiblyTruncateStartForJunkProportion()
AccumulateSegmentStats(segments, 'stage 8 [truncate segment-starts for --max-junk-proportion')
for s in segments:
s.PossiblyTruncateEndForJunkProportion()
AccumulateSegmentStats(segments, 'stage 9 [truncate segment-ends for --max-junk-proportion')
new_segments = []
for s in segments:
if s.ContainsAtLeastOneScoredNonOovWord():
new_segments.append(s)
else:
s.debug_str += '[deleted-because-no-scored-non-oov-words]'
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 10 [remove segments without scored,non-OOV words]')
new_segments = []
for s in segments:
j = s.JunkProportion()
if j <= args.max_junk_proportion:
new_segments.append(s)
else:
s.debug_str += '[deleted-because-junk-proportion={0}]'.format(j)
deleted_segments.append(s)
segments = new_segments
AccumulateSegmentStats(segments, 'stage 11 [remove segments with junk exceeding --max-junk-proportion]')
new_segments = []
if len(segments) > 0:
new_segments.append(segments[0])
for i in range(1, len(segments)):
if new_segments[-1].EndTime() >= segments[i].StartTime():
new_segments[-1].MergeWithSegment(segments[i])
else:
new_segments.append(segments[i])
segments = new_segments
AccumulateSegmentStats(segments, 'stage 12 [merge overlapping or touching segments]')
for i in range(len(segments) - 1):
if segments[i].EndTime() > segments[i+1].StartTime():
# this just adds something to --ctm-edits-out output
segments[i+1].debug_str += ",overlaps-previous-segment"
if len(segments) == 0:
num_utterances_without_segments += 1
return (segments, deleted_segments)
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
# Gives time in string form as an exact multiple of the frame-length, e.g. 0.01
# (after rounding).
def TimeToString(time, frame_length):
n = round(time / frame_length)
assert n >= 0
# The next function call will remove trailing zeros while printing it, so
# that e.g. 0.01 will be printed as 0.01 and not 0.0099999999999999. It
# seems that doing this in a simple way is not really possible (at least,
# not without assuming that frame_length is of the form 10^-n, which we
# don't really want to do).
return FloatToString(n * frame_length)
def WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
old_utterance_name, segments):
for n in range(len(segments)):
segment = segments[n]
new_utterance_name = old_utterance_name + "-" + str(n + 1)
print(new_utterance_name, segment.Text(), file = text_output_handle)
print(new_utterance_name, old_utterance_name,
TimeToString(segment.StartTime(), args.frame_length),
TimeToString(segment.EndTime(), args.frame_length),
file = segments_output_handle)
def PrintDebugInfoForUtterance(ctm_edits_out_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance):
# info_to_print will be list of 2-tuples (time, 'start-segment-n'|'end-segment-n')
# representing the start or end times of segments.
info_to_print = []
for n in range(len(segments_for_utterance)):
segment = segments_for_utterance[n]
start_string = 'start-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
# for segments that were deleted we print info like start-deleted-segment-1, and
# otherwise similar info to segments that were retained.
for n in range(len(deleted_segments_for_utterance)):
segment = deleted_segments_for_utterance[n]
start_string = 'start-deleted-segment-' + str(n+1) + '[' + segment.DebugInfo() + ']'
info_to_print.append( (segment.StartTime(), start_string) )
end_string = 'end-deleted-segment-' + str(n+1)
info_to_print.append( (segment.EndTime(), end_string) )
info_to_print = sorted(info_to_print)
for i in range(len(split_lines_of_cur_utterance)):
split_line=split_lines_of_cur_utterance[i]
split_line[0] += '[' + str(i) + ']' # add an index like [0], [1], to
# the utterance-id so we can easily
# look up segment indexes.
start_time = float(split_line[2])
end_time = start_time + float(split_line[3])
split_line_copy = list(split_line)
while len(info_to_print) > 0 and info_to_print[0][0] <= end_time:
(segment_start, string) = info_to_print[0]
# shift the first element off of info_to_print.
info_to_print = info_to_print[1:]
# add a field like 'start-segment1[...]=3.21' to what we're about to print.
split_line_copy.append(string + "=" + TimeToString(segment_start, args.frame_length))
print(' '.join(split_line_copy), file = ctm_edits_out_handle)
def AccWordStatsForUtterance(split_lines_of_utt,
segments_for_utterance):
global word_count_pair
line_is_in_segment = [ False ] * len(split_lines_of_utt)
for segment in segments_for_utterance:
for i in range(segment.start_index, segment.end_index):
line_is_in_segment[i] = True
for i in range(len(split_lines_of_utt)):
this_ref_word = split_lines_of_utt[i][6]
if this_ref_word != '<eps>':
word_count_pair[this_ref_word][0] += 1
if not line_is_in_segment[i]:
word_count_pair[this_ref_word][1] += 1
def PrintWordStats(word_stats_out):
try:
f = open(word_stats_out, 'w')
except:
sys.exit("segment_ctm_edits.py: error opening word-stats file --word-stats-out={0} "
"for writing".format(word_stats_out))
global word_count_pair
# badness^3 * total_count = pair[1]^3 / pair[0]^2.
for key, pair in sorted(word_count_pair.items(),
key = lambda item: (item[1][1] ** 3) * 1.0 / (item[1][0] ** 2),
reverse = True):
badness = pair[1] * 1.0 / pair[0]
total_count = pair[0]
print(key, badness, total_count, file = f)
try:
f.close()
except:
sys.exit("segment_ctm_edits.py: error closing file --word-stats-out={0} "
"(full disk?)".format(word_stats_out))
print("segment_ctm_edits.py: please see the file {0} for word-level statistics "
"saying how frequently each word was excluded for a segment; format is "
"<word> <proportion-of-time-excluded> <total-count>. Particularly "
"problematic words appear near the top of the file.".format(word_stats_out),
file = sys.stderr)
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
text_output_handle = open(args.text_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening text output "
"file {0}".format(args.text_out))
try:
segments_output_handle = open(args.segments_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening segments output "
"file {0}".format(args.text_out))
if args.ctm_edits_out != None:
try:
ctm_edits_output_handle = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
# Most of what we're doing in the lines below is splitting the input lines
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
(segments_for_utterance,
deleted_segments_for_utterance) = GetSegmentsForUtterance(split_lines_of_cur_utterance)
AccWordStatsForUtterance(split_lines_of_cur_utterance, segments_for_utterance)
WriteSegmentsForUtterance(text_output_handle, segments_output_handle,
cur_utterance, segments_for_utterance)
if args.ctm_edits_out != None:
PrintDebugInfoForUtterance(ctm_edits_output_handle,
split_lines_of_cur_utterance,
segments_for_utterance,
deleted_segments_for_utterance)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
text_output_handle.close()
segments_output_handle.close()
if args.ctm_edits_out != None:
ctm_edits_output_handle.close()
except:
sys.exit("modify_ctm_edits.py: error closing one or more outputs "
"(broken pipe or full disk?)")
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
oov_symbol = None
if args.oov_symbol_file != None:
try:
with open(args.oov_symbol_file) as f:
line = f.readline()
assert len(line.split()) == 1
oov_symbol = line.split()[0]
assert f.readline() == ''
except Exception as e:
sys.exit("segment_ctm_edits.py: error reading file --oov-symbol-file=" +
args.oov_symbol_file + ", error is: " + str(e))
elif args.unk_padding != 0.0:
sys.exit("segment_ctm_edits.py: if the --unk-padding option is nonzero (which "
"it is by default, the --oov-symbol-file option must be supplied.")
segment_total_length = defaultdict(int)
num_segments = defaultdict(int)
word_count_pair = defaultdict(lambda: [0, 0])
num_utterances = 0
num_utterances_without_segments = 0
total_length_of_utterances = 0
ProcessData()
PrintSegmentStats()
if args.word_stats_out != None:
PrintWordStats(args.word_stats_out)
if args.ctm_edits_out != None:
print("segment_ctm_edits.py: detailed utterance-level debug information "
"is in " + args.ctm_edits_out, file = sys.stderr)
| true | true |
7901acf7eec972aa6135b6cc59d029e7919989aa | 12,518 | py | Python | train_semisup.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | null | null | null | train_semisup.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | null | null | null | train_semisup.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | null | null | null | import time
import numpy as np
import tensorflow as tf
import layers as L
import vat
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('device', '/gpu:0', "device")
tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}")
tf.app.flags.DEFINE_string('log_dir', "", "log_dir")
tf.app.flags.DEFINE_integer('seed', 1, "initial random seed")
tf.app.flags.DEFINE_bool('validation', False, "")
tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch")
tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch")
tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch")
tf.app.flags.DEFINE_integer('eval_freq', 5, "")
tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training")
tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay")
tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate")
tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start")
tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}")
if FLAGS.dataset == 'cifar10':
from cifar10 import inputs, unlabeled_inputs
elif FLAGS.dataset == 'svhn':
from svhn import inputs, unlabeled_inputs
else:
raise NotImplementedError
NUM_EVAL_EXAMPLES = 5000
def build_training_graph(x, y, ul_x, ul_u, lr, mom):
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False,
)
logit = vat.forward(x)
nll_loss = L.ce_loss(logit, y)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
if FLAGS.method == 'vat':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
additional_loss = vat_loss
elif FLAGS.method == 'vatent':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
ent_loss = L.entropy_y_x(ul_logit)
additional_loss = vat_loss + ent_loss
elif FLAGS.method == 'baseline':
additional_loss = 0
else:
raise NotImplementedError
loss = nll_loss + additional_loss
opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)
tvars = tf.trainable_variables()
grads_and_vars = opt.compute_gradients(loss, tvars)
train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
return loss, train_op, global_step, ul_u_updated
def build_eval_graph(x, y, ul_x, ul_u):
losses = {}
logit = vat.forward(x, is_training=False, update_batch_stats=False)
nll_loss = L.ce_loss(logit, y)
losses['NLL'] = nll_loss
acc = L.accuracy(logit, y)
losses['Acc'] = acc
scope = tf.get_variable_scope()
scope.reuse_variables()
# at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False)
# losses['AT_loss'] = at_loss
ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False)
losses['VAT_loss'] = vat_loss
return losses
def main(_):
print(FLAGS.epsilon, FLAGS.top_bn)
np.random.seed(seed=FLAGS.seed)
tf.set_random_seed(np.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device("/cpu:0"):
images, labels = inputs(batch_size=FLAGS.batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images = tf.placeholder(shape=images.shape, dtype=tf.float32)
'''unlabeled_inputs(batch_size=FLAGS.ul_batch_size,
validation=FLAGS.validation,
shuffle=True)'''
images_eval_train, labels_eval_train = inputs(batch_size=FLAGS.eval_batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images_eval_train = unlabeled_inputs(batch_size=FLAGS.eval_batch_size,
validation=FLAGS.validation,
shuffle=True)
images_eval_test, labels_eval_test = inputs(batch_size=FLAGS.eval_batch_size,
train=False,
validation=FLAGS.validation,
shuffle=True)
def placeholder_like(x, name=None):
return tf.placeholder(shape=x.shape, dtype=tf.float32, name=name)
def random_sphere(shape):
n = tf.random_normal(shape=shape, dtype=tf.float32)
n = tf.reshape(n, shape=(int(shape[0]), -1))
n = tf.nn.l2_normalize(n, dim=1)
n = tf.reshape(n, shape)
return n
def random_sphere_numpy(shape):
n = np.random.normal(size=shape)
proj_shape = tuple([n.shape[0]] + [1 for _ in range(len(shape) - 1)])
return n / np.linalg.norm(n.reshape((n.shape[0], -1)), axis=1).reshape(proj_shape)
print(ul_images.shape)
# ul_u = random_sphere(ul_images.shape)
# ul_u_eval_train = random_sphere(ul_images_eval_train.shape)
# ul_u_eval_test = random_sphere(images_eval_test.shape)
ul_u = placeholder_like(ul_images, "ul_u")
ul_u_eval_train = placeholder_like(ul_images_eval_train, "ul_u_eval_train")
ul_u_eval_test = placeholder_like(images_eval_test, "ul_u_eval_test")
with tf.device(FLAGS.device):
lr = tf.placeholder(tf.float32, shape=[], name="learning_rate")
mom = tf.placeholder(tf.float32, shape=[], name="momentum")
with tf.variable_scope("CNN") as scope:
# Build training graph
loss, train_op, global_step, ul_u_updated = build_training_graph(
images, labels, ul_images, ul_u, lr, mom)
scope.reuse_variables()
# Build eval graph
losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train)
losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test)
init_op = tf.global_variables_initializer()
if not FLAGS.log_dir:
logdir = None
writer_train = None
writer_test = None
else:
logdir = FLAGS.log_dir
writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g)
writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g)
saver = tf.train.Saver(tf.global_variables())
sv = tf.train.Supervisor(
is_chief=True,
logdir=logdir,
init_op=init_op,
init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1},
saver=saver,
global_step=global_step,
summary_op=None,
summary_writer=None,
save_model_secs=150, recovery_wait_secs=0)
ul_images_np = np.load("train_images.npy").reshape((-1, 32, 32, 3))
print("TRUNCATING UL DATA")
ul_images_np = ul_images_np[:FLAGS.batch_size]
ul_u_np = random_sphere_numpy(ul_images_np.shape)
print(ul_images_np.shape, ul_u_np.shape)
print("Training...")
with sv.managed_session() as sess:
for ep in range(FLAGS.num_epochs):
if sv.should_stop():
break
if ep < FLAGS.epoch_decay_start:
feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}
else:
decayed_lr = ((FLAGS.num_epochs - ep) / float(
FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate
feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}
sum_loss = 0
start = time.time()
for i in range(FLAGS.num_iter_per_epoch):
picked = range(FLAGS.batch_size) # np.random.choice(len(ul_images_np), size=FLAGS.batch_size, replace=False)
feed_dict[ul_images] = ul_images_np[picked]
feed_dict[ul_u] = ul_u_np[picked]
ul_u_updated_np, _, batch_loss, _ = sess.run([ul_u_updated, train_op, loss, global_step],
feed_dict=feed_dict)
delta = ul_u_updated_np - ul_u_np[picked]
# print("pos", ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :4])
# print("delta", np.linalg.norm(delta.reshape((FLAGS.batch_size, -1)), axis=1)[:4])
print(np.linalg.norm(ul_u_updated_np - ul_u_np[picked]), ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :3])
ul_u_np[picked] = ul_u_updated_np
sum_loss += batch_loss
end = time.time()
print("Epoch:", ep, "CE_loss_train:", sum_loss / FLAGS.num_iter_per_epoch, "elapsed_time:", end - start)
if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:
# Eval on training data
act_values_dict = {}
feed_dict = {ul_u_eval_train: random_sphere_numpy(ul_u_eval_train.shape)}
for key, _ in losses_eval_train.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_train.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("train-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_train is not None:
writer_train.add_summary(summary, current_global_step)
# Eval on test data
act_values_dict = {}
print("HOW COME THIS DOES NOT DEPEND ON ul_images_eval_train? SOMETHING'S WRONG HERE.")
feed_dict = {ul_u_eval_test: random_sphere_numpy(ul_u_eval_test.shape)}
for key, _ in losses_eval_test.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_test.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("test-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_test is not None:
writer_test.add_summary(summary, current_global_step)
saver.save(sess, sv.save_path, global_step=global_step)
sv.stop()
if __name__ == "__main__":
tf.app.run()
| 47.778626 | 132 | 0.582202 | import time
import numpy as np
import tensorflow as tf
import layers as L
import vat
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('device', '/gpu:0', "device")
tf.app.flags.DEFINE_string('dataset', 'cifar10', "{cifar10, svhn}")
tf.app.flags.DEFINE_string('log_dir', "", "log_dir")
tf.app.flags.DEFINE_integer('seed', 1, "initial random seed")
tf.app.flags.DEFINE_bool('validation', False, "")
tf.app.flags.DEFINE_integer('batch_size', 32, "the number of examples in a batch")
tf.app.flags.DEFINE_integer('ul_batch_size', 128, "the number of unlabeled examples in a batch")
tf.app.flags.DEFINE_integer('eval_batch_size', 100, "the number of eval examples in a batch")
tf.app.flags.DEFINE_integer('eval_freq', 5, "")
tf.app.flags.DEFINE_integer('num_epochs', 120, "the number of epochs for training")
tf.app.flags.DEFINE_integer('epoch_decay_start', 80, "epoch of starting learning rate decay")
tf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, "the number of updates per epoch")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "initial leanring rate")
tf.app.flags.DEFINE_float('mom1', 0.9, "initial momentum rate")
tf.app.flags.DEFINE_float('mom2', 0.5, "momentum rate after epoch_decay_start")
tf.app.flags.DEFINE_string('method', 'vat', "{vat, vatent, baseline}")
if FLAGS.dataset == 'cifar10':
from cifar10 import inputs, unlabeled_inputs
elif FLAGS.dataset == 'svhn':
from svhn import inputs, unlabeled_inputs
else:
raise NotImplementedError
NUM_EVAL_EXAMPLES = 5000
def build_training_graph(x, y, ul_x, ul_u, lr, mom):
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False,
)
logit = vat.forward(x)
nll_loss = L.ce_loss(logit, y)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
if FLAGS.method == 'vat':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
additional_loss = vat_loss
elif FLAGS.method == 'vatent':
ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
vat_loss, ul_u_updated = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit)
ent_loss = L.entropy_y_x(ul_logit)
additional_loss = vat_loss + ent_loss
elif FLAGS.method == 'baseline':
additional_loss = 0
else:
raise NotImplementedError
loss = nll_loss + additional_loss
opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)
tvars = tf.trainable_variables()
grads_and_vars = opt.compute_gradients(loss, tvars)
train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
return loss, train_op, global_step, ul_u_updated
def build_eval_graph(x, y, ul_x, ul_u):
losses = {}
logit = vat.forward(x, is_training=False, update_batch_stats=False)
nll_loss = L.ce_loss(logit, y)
losses['NLL'] = nll_loss
acc = L.accuracy(logit, y)
losses['Acc'] = acc
scope = tf.get_variable_scope()
scope.reuse_variables()
ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
vat_loss = vat.virtual_adversarial_loss(ul_x, ul_u, ul_logit, is_training=False)
losses['VAT_loss'] = vat_loss
return losses
def main(_):
print(FLAGS.epsilon, FLAGS.top_bn)
np.random.seed(seed=FLAGS.seed)
tf.set_random_seed(np.random.randint(1234))
with tf.Graph().as_default() as g:
with tf.device("/cpu:0"):
images, labels = inputs(batch_size=FLAGS.batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images = tf.placeholder(shape=images.shape, dtype=tf.float32)
images_eval_train, labels_eval_train = inputs(batch_size=FLAGS.eval_batch_size,
train=True,
validation=FLAGS.validation,
shuffle=True)
ul_images_eval_train = unlabeled_inputs(batch_size=FLAGS.eval_batch_size,
validation=FLAGS.validation,
shuffle=True)
images_eval_test, labels_eval_test = inputs(batch_size=FLAGS.eval_batch_size,
train=False,
validation=FLAGS.validation,
shuffle=True)
def placeholder_like(x, name=None):
return tf.placeholder(shape=x.shape, dtype=tf.float32, name=name)
def random_sphere(shape):
n = tf.random_normal(shape=shape, dtype=tf.float32)
n = tf.reshape(n, shape=(int(shape[0]), -1))
n = tf.nn.l2_normalize(n, dim=1)
n = tf.reshape(n, shape)
return n
def random_sphere_numpy(shape):
n = np.random.normal(size=shape)
proj_shape = tuple([n.shape[0]] + [1 for _ in range(len(shape) - 1)])
return n / np.linalg.norm(n.reshape((n.shape[0], -1)), axis=1).reshape(proj_shape)
print(ul_images.shape)
ul_u = placeholder_like(ul_images, "ul_u")
ul_u_eval_train = placeholder_like(ul_images_eval_train, "ul_u_eval_train")
ul_u_eval_test = placeholder_like(images_eval_test, "ul_u_eval_test")
with tf.device(FLAGS.device):
lr = tf.placeholder(tf.float32, shape=[], name="learning_rate")
mom = tf.placeholder(tf.float32, shape=[], name="momentum")
with tf.variable_scope("CNN") as scope:
loss, train_op, global_step, ul_u_updated = build_training_graph(
images, labels, ul_images, ul_u, lr, mom)
scope.reuse_variables()
losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train, ul_u_eval_train)
losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test, ul_u_eval_test)
init_op = tf.global_variables_initializer()
if not FLAGS.log_dir:
logdir = None
writer_train = None
writer_test = None
else:
logdir = FLAGS.log_dir
writer_train = tf.summary.FileWriter(FLAGS.log_dir + "/train", g)
writer_test = tf.summary.FileWriter(FLAGS.log_dir + "/test", g)
saver = tf.train.Saver(tf.global_variables())
sv = tf.train.Supervisor(
is_chief=True,
logdir=logdir,
init_op=init_op,
init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1},
saver=saver,
global_step=global_step,
summary_op=None,
summary_writer=None,
save_model_secs=150, recovery_wait_secs=0)
ul_images_np = np.load("train_images.npy").reshape((-1, 32, 32, 3))
print("TRUNCATING UL DATA")
ul_images_np = ul_images_np[:FLAGS.batch_size]
ul_u_np = random_sphere_numpy(ul_images_np.shape)
print(ul_images_np.shape, ul_u_np.shape)
print("Training...")
with sv.managed_session() as sess:
for ep in range(FLAGS.num_epochs):
if sv.should_stop():
break
if ep < FLAGS.epoch_decay_start:
feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}
else:
decayed_lr = ((FLAGS.num_epochs - ep) / float(
FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate
feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}
sum_loss = 0
start = time.time()
for i in range(FLAGS.num_iter_per_epoch):
picked = range(FLAGS.batch_size)
feed_dict[ul_images] = ul_images_np[picked]
feed_dict[ul_u] = ul_u_np[picked]
ul_u_updated_np, _, batch_loss, _ = sess.run([ul_u_updated, train_op, loss, global_step],
feed_dict=feed_dict)
delta = ul_u_updated_np - ul_u_np[picked]
print(np.linalg.norm(ul_u_updated_np - ul_u_np[picked]), ul_u_updated_np.reshape((FLAGS.batch_size, -1))[0, :3])
ul_u_np[picked] = ul_u_updated_np
sum_loss += batch_loss
end = time.time()
print("Epoch:", ep, "CE_loss_train:", sum_loss / FLAGS.num_iter_per_epoch, "elapsed_time:", end - start)
if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:
act_values_dict = {}
feed_dict = {ul_u_eval_train: random_sphere_numpy(ul_u_eval_train.shape)}
for key, _ in losses_eval_train.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_train.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("train-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_train is not None:
writer_train.add_summary(summary, current_global_step)
act_values_dict = {}
print("HOW COME THIS DOES NOT DEPEND ON ul_images_eval_train? SOMETHING'S WRONG HERE.")
feed_dict = {ul_u_eval_test: random_sphere_numpy(ul_u_eval_test.shape)}
for key, _ in losses_eval_test.iteritems():
act_values_dict[key] = 0
n_iter_per_epoch = NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size
for i in range(n_iter_per_epoch):
values = losses_eval_test.values()
act_values = sess.run(values, feed_dict=feed_dict)
for key, value in zip(act_values_dict.keys(), act_values):
act_values_dict[key] += value
summary = tf.Summary()
current_global_step = sess.run(global_step)
for key, value in act_values_dict.iteritems():
print("test-" + key, value / n_iter_per_epoch)
summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)
if writer_test is not None:
writer_test.add_summary(summary, current_global_step)
saver.save(sess, sv.save_path, global_step=global_step)
sv.stop()
if __name__ == "__main__":
tf.app.run()
| true | true |
7901adc9c188a39b0cf7d9c63de2e761bf3b34c6 | 503 | py | Python | ongabot/handler/helpcommand.py | walkerjens/telegram.ongabot | 3c4edd8ba9815c087ed18b07f3f4bc9c90701d60 | [
"MIT"
] | null | null | null | ongabot/handler/helpcommand.py | walkerjens/telegram.ongabot | 3c4edd8ba9815c087ed18b07f3f4bc9c90701d60 | [
"MIT"
] | null | null | null | ongabot/handler/helpcommand.py | walkerjens/telegram.ongabot | 3c4edd8ba9815c087ed18b07f3f4bc9c90701d60 | [
"MIT"
] | null | null | null | """This module contains the HelpCommandHandler class."""
from telegram import Update
from telegram.ext import CommandHandler, CallbackContext
import utils.helper as helper
class HelpCommandHandler(CommandHandler):
"""Handler for /help command"""
def __init__(self):
CommandHandler.__init__(self, "help", callback)
def callback(update: Update, _: CallbackContext):
"""Print the help text for a /start or /help command"""
update.message.reply_text(helper.create_help_text())
| 27.944444 | 59 | 0.747515 | from telegram import Update
from telegram.ext import CommandHandler, CallbackContext
import utils.helper as helper
class HelpCommandHandler(CommandHandler):
def __init__(self):
CommandHandler.__init__(self, "help", callback)
def callback(update: Update, _: CallbackContext):
update.message.reply_text(helper.create_help_text())
| true | true |
7901adeebc4eddb9811775a1dd8834093c7ac65d | 2,057 | py | Python | examples/pylab_examples/mri_with_eeg.py | yuvallanger/matplotlib | e0020d318a9a9685594c6bff4631f74599321459 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2017-04-11T08:55:30.000Z | 2022-03-25T04:31:26.000Z | examples/pylab_examples/mri_with_eeg.py | epgauss/matplotlib | c9898ea9a30c67c579ab27cd61b68e2abae0fb0e | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/mri_with_eeg.py | epgauss/matplotlib | c9898ea9a30c67c579ab27cd61b68e2abae0fb0e | [
"MIT",
"BSD-3-Clause"
] | 14 | 2015-10-05T04:15:46.000Z | 2020-06-11T18:06:02.000Z | #!/usr/bin/env python
"""
This now uses the imshow command instead of pcolor which *is much
faster*
"""
from __future__ import division, print_function
import numpy as np
from matplotlib.pyplot import *
from matplotlib.collections import LineCollection
import matplotlib.cbook as cbook
# I use if 1 to break up the different regions of code visually
if 1: # load the data
# data are 256x256 16 bit integers
dfile = cbook.get_sample_data('s1045.ima.gz')
im = np.fromstring(dfile.read(), np.uint16).astype(float)
im.shape = 256, 256
if 1: # plot the MRI in pcolor
subplot(221)
imshow(im, cmap=cm.gray)
axis('off')
if 1: # plot the histogram of MRI intensity
subplot(222)
im = np.ravel(im)
im = im[np.nonzero(im)] # ignore the background
im = im/(2.0**15) # normalize
hist(im, 100)
xticks([-1, -.5, 0, .5, 1])
yticks([])
xlabel('intensity')
ylabel('MRI density')
if 1: # plot the EEG
# load the data
numSamples, numRows = 800,4
eegfile = cbook.get_sample_data('eeg.dat', asfileobj=False)
print('loading eeg %s' % eegfile)
data = np.fromstring(open(eegfile, 'rb').read(), float)
data.shape = numSamples, numRows
t = 10.0 * np.arange(numSamples, dtype=float)/numSamples
ticklocs = []
ax = subplot(212)
xlim(0,10)
xticks(np.arange(10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
xlabel('time (s)')
show()
| 26.037975 | 71 | 0.618376 |
from __future__ import division, print_function
import numpy as np
from matplotlib.pyplot import *
from matplotlib.collections import LineCollection
import matplotlib.cbook as cbook
if 1:
dfile = cbook.get_sample_data('s1045.ima.gz')
im = np.fromstring(dfile.read(), np.uint16).astype(float)
im.shape = 256, 256
if 1:
subplot(221)
imshow(im, cmap=cm.gray)
axis('off')
if 1:
subplot(222)
im = np.ravel(im)
im = im[np.nonzero(im)]
im = im/(2.0**15)
hist(im, 100)
xticks([-1, -.5, 0, .5, 1])
yticks([])
xlabel('intensity')
ylabel('MRI density')
if 1:
numSamples, numRows = 800,4
eegfile = cbook.get_sample_data('eeg.dat', asfileobj=False)
print('loading eeg %s' % eegfile)
data = np.fromstring(open(eegfile, 'rb').read(), float)
data.shape = numSamples, numRows
t = 10.0 * np.arange(numSamples, dtype=float)/numSamples
ticklocs = []
ax = subplot(212)
xlim(0,10)
xticks(np.arange(10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7
y0 = dmin
y1 = (numRows-1) * dr + dmax
ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
ax.set_yticks(ticklocs)
ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
xlabel('time (s)')
show()
| true | true |
7901b00068d35d764431ee575b195d337b0598bd | 840 | py | Python | pyNastran/gui/matplotlib_backend.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 293 | 2015-03-22T20:22:01.000Z | 2022-03-14T20:28:24.000Z | pyNastran/gui/matplotlib_backend.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 512 | 2015-03-14T18:39:27.000Z | 2022-03-31T16:15:43.000Z | pyNastran/gui/matplotlib_backend.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 136 | 2015-03-19T03:26:06.000Z | 2022-03-25T22:14:54.000Z | """
Selects a matplotlib backend so you can run without a GUI/tkinter. Supports:
- PyQt5
- PySide2
- WX
- Tkinter
"""
from pyNastran.gui import IS_DEV
if IS_DEV:
# there is no interactive backend when testing on TravisCI
matplotlib_backend = 'Agg'
else:
# fails if using the terminal and PyQt/PySide & qtpy are installed
# how do I check if there is a terminal vs just running in command line?
#
try:
from pyNastran.gui.qt_version import qt_int
matplotlib_backend = 'Qt%iAgg' % qt_int
except ImportError:
try:
# hasn't been tested on a machine without a backend...
# default matplotlib backend
import tkinter
matplotlib_backend = 'tkAgg'
except ImportError:
# no-gui backend
matplotlib_backend = 'Agg'
| 28.965517 | 77 | 0.642857 | from pyNastran.gui import IS_DEV
if IS_DEV:
matplotlib_backend = 'Agg'
else:
try:
from pyNastran.gui.qt_version import qt_int
matplotlib_backend = 'Qt%iAgg' % qt_int
except ImportError:
try:
# default matplotlib backend
import tkinter
matplotlib_backend = 'tkAgg'
except ImportError:
# no-gui backend
matplotlib_backend = 'Agg'
| true | true |
7901b022a8a252e56a5eb0648b1a4bcfdfff373f | 110 | py | Python | ocra/download.py | mzntaka0/ocra | 037afc508ac319efbcec99a72b9b3793cecf3fc9 | [
"Apache-2.0"
] | 4 | 2018-12-27T01:43:51.000Z | 2019-08-15T03:01:15.000Z | audy/mix/speaker.py | mzntaka0/audy | e347aac79ceb783df23a51e842672aaa7b1f7514 | [
"Apache-2.0"
] | 1 | 2019-09-09T08:46:18.000Z | 2019-09-09T08:46:18.000Z | audy/mix/speaker.py | mzntaka0/audy | e347aac79ceb783df23a51e842672aaa7b1f7514 | [
"Apache-2.0"
] | 1 | 2019-11-08T13:48:51.000Z | 2019-11-08T13:48:51.000Z | # -*- coding: utf-8 -*-
"""
"""
import argparse
import os
import sys
if __name__ == '__main__':
pass
| 7.857143 | 26 | 0.572727 |
import argparse
import os
import sys
if __name__ == '__main__':
pass
| true | true |
7901b0441f6d52e9d452fb15661e518e8db12f07 | 2,615 | py | Python | brainrender_gui/widgets/add_regions.py | brainglobe/bg-brainrender-gui | 4048f789fbdc1a5d4c5c652a4f37222446c8aa2f | [
"BSD-3-Clause"
] | 7 | 2020-07-09T10:27:38.000Z | 2020-10-13T13:16:20.000Z | brainrender_gui/widgets/add_regions.py | brainglobe/bg-brainrender-gui | 4048f789fbdc1a5d4c5c652a4f37222446c8aa2f | [
"BSD-3-Clause"
] | 12 | 2020-07-31T15:03:49.000Z | 2020-12-11T08:00:20.000Z | brainrender_gui/widgets/add_regions.py | brainglobe/bg-brainrender-gui | 4048f789fbdc1a5d4c5c652a4f37222446c8aa2f | [
"BSD-3-Clause"
] | null | null | null | from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout
from brainrender_gui.style import style, update_css
class AddRegionsWindow(QDialog):
left = 250
top = 250
width = 400
height = 300
label_msg = (
"Write the acronyms of brainregions "
+ "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]"
)
def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
# Regions
label = QLabel(self)
label.setObjectName("PopupLabel")
label.setText(self.label_msg)
self.textbox = QLineEdit(self)
# Alpha
alpha_label = QLabel(self)
alpha_label.setObjectName("PopupLabel")
alpha_label.setText("Alpha")
self.alpha_textbox = QLineEdit(self)
self.alpha_textbox.setText(str(1.0))
# Color
color_label = QLabel(self)
color_label.setObjectName("PopupLabel")
color_label.setText("Color")
self.color_textbox = QLineEdit(self)
self.color_textbox.setText("atlas")
# Create a button in the window
self.button = QPushButton("Add regions", self)
self.button.clicked.connect(self.on_click)
self.button.setObjectName("RegionsButton")
layout.addWidget(label)
layout.addWidget(self.textbox)
layout.addWidget(alpha_label)
layout.addWidget(self.alpha_textbox)
layout.addWidget(color_label)
layout.addWidget(self.color_textbox)
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
def on_click(self):
"""
On click or 'Enter' get the regions
from the input and call the add_regions
method of the main window
"""
regions = self.textbox.text().split(" ")
self.main_window.add_regions(
regions, self.alpha_textbox.text(), self.color_textbox.text()
)
self.close()
| 27.526316 | 79 | 0.602677 | from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout
from brainrender_gui.style import style, update_css
class AddRegionsWindow(QDialog):
left = 250
top = 250
width = 400
height = 300
label_msg = (
"Write the acronyms of brainregions "
+ "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]"
)
def __init__(self, main_window, palette):
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
def ui(self):
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
label = QLabel(self)
label.setObjectName("PopupLabel")
label.setText(self.label_msg)
self.textbox = QLineEdit(self)
alpha_label = QLabel(self)
alpha_label.setObjectName("PopupLabel")
alpha_label.setText("Alpha")
self.alpha_textbox = QLineEdit(self)
self.alpha_textbox.setText(str(1.0))
color_label = QLabel(self)
color_label.setObjectName("PopupLabel")
color_label.setText("Color")
self.color_textbox = QLineEdit(self)
self.color_textbox.setText("atlas")
self.button = QPushButton("Add regions", self)
self.button.clicked.connect(self.on_click)
self.button.setObjectName("RegionsButton")
layout.addWidget(label)
layout.addWidget(self.textbox)
layout.addWidget(alpha_label)
layout.addWidget(self.alpha_textbox)
layout.addWidget(color_label)
layout.addWidget(self.color_textbox)
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
def on_click(self):
regions = self.textbox.text().split(" ")
self.main_window.add_regions(
regions, self.alpha_textbox.text(), self.color_textbox.text()
)
self.close()
| true | true |
7901b06d39b2aefc93c03f56e1b5273f667d41c6 | 1,652 | py | Python | federation/protocols/activitypub/signing.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 93 | 2016-11-26T10:52:13.000Z | 2022-01-15T20:07:35.000Z | federation/protocols/activitypub/signing.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 75 | 2016-10-18T10:15:44.000Z | 2019-10-05T22:16:32.000Z | federation/protocols/activitypub/signing.py | weex/federation | 01357aacb04b076442ce5f803a0fc65df5a74d09 | [
"BSD-3-Clause"
] | 9 | 2017-04-08T08:03:45.000Z | 2021-09-13T22:00:48.000Z | """
Thank you Funkwhale for inspiration on the HTTP signatures parts <3
https://funkwhale.audio/
"""
import datetime
import logging
from typing import Union
import pytz
from Crypto.PublicKey.RSA import RsaKey
from requests_http_signature import HTTPSignatureHeaderAuth
from federation.types import RequestType
from federation.utils.network import parse_http_date
from federation.utils.text import encode_if_text
logger = logging.getLogger("federation")
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth:
"""
Get HTTP signature authentication for a request.
"""
key = private_key.exportKey()
return HTTPSignatureHeaderAuth(
headers=["(request-target)", "user-agent", "host", "date"],
algorithm="rsa-sha256",
key=key,
key_id=private_key_id,
)
def verify_request_signature(request: RequestType, public_key: Union[str, bytes]):
"""
Verify HTTP signature in request against a public key.
"""
key = encode_if_text(public_key)
date_header = request.headers.get("Date")
if not date_header:
raise ValueError("Rquest Date header is missing")
ts = parse_http_date(date_header)
dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
past_delta = datetime.timedelta(hours=24)
future_delta = datetime.timedelta(seconds=30)
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if dt < now - past_delta or dt > now + future_delta:
raise ValueError("Request Date is too far in future or past")
HTTPSignatureHeaderAuth.verify(request, key_resolver=lambda **kwargs: key)
| 31.769231 | 97 | 0.734867 | import datetime
import logging
from typing import Union
import pytz
from Crypto.PublicKey.RSA import RsaKey
from requests_http_signature import HTTPSignatureHeaderAuth
from federation.types import RequestType
from federation.utils.network import parse_http_date
from federation.utils.text import encode_if_text
logger = logging.getLogger("federation")
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth:
key = private_key.exportKey()
return HTTPSignatureHeaderAuth(
headers=["(request-target)", "user-agent", "host", "date"],
algorithm="rsa-sha256",
key=key,
key_id=private_key_id,
)
def verify_request_signature(request: RequestType, public_key: Union[str, bytes]):
key = encode_if_text(public_key)
date_header = request.headers.get("Date")
if not date_header:
raise ValueError("Rquest Date header is missing")
ts = parse_http_date(date_header)
dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
past_delta = datetime.timedelta(hours=24)
future_delta = datetime.timedelta(seconds=30)
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if dt < now - past_delta or dt > now + future_delta:
raise ValueError("Request Date is too far in future or past")
HTTPSignatureHeaderAuth.verify(request, key_resolver=lambda **kwargs: key)
| true | true |
7901b4a1191898ebebf5d5a2dddf1341b901ae6e | 4,005 | py | Python | dhis2_core/src/dhis2/code_list/svcm.py | dhis2/dhis2-python-cli | d5ec976a5c04e6897756e3be14924ec74a4456fd | [
"BSD-3-Clause"
] | 7 | 2020-10-15T08:54:50.000Z | 2021-12-19T14:37:49.000Z | dhis2_core/src/dhis2/code_list/svcm.py | dhis2/dhis2-python-cli | d5ec976a5c04e6897756e3be14924ec74a4456fd | [
"BSD-3-Clause"
] | 3 | 2016-08-12T14:11:14.000Z | 2021-03-08T17:06:29.000Z | dhis2_core/src/dhis2/code_list/svcm.py | dhis2/dhis2-python-cli | d5ec976a5c04e6897756e3be14924ec74a4456fd | [
"BSD-3-Clause"
] | 4 | 2016-02-10T23:03:08.000Z | 2020-12-28T13:18:49.000Z | import json
import logging
import sys
from typing import Any, Callable, Dict, List
from dhis2.core.http import BaseHttpRequest
from dhis2.core.inventory import HostResolved, Inventory, resolve_one
from fhir.resources.bundle import Bundle
from .models.svcm import CodeList, SVCMConfig
from .svcm_resources import build_bundle
log = logging.getLogger(__name__)
def get_source(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
host = resolve_one(config.source.id, inventory)
if "dhis2" not in host.type:
log.error("Only 'dhis2' source type is currently supported")
sys.exit(-1)
log.info(f"Creating source from '{host.key}' with base url '{host.baseUrl}'")
def fn():
filters = []
# https://docs.dhis2.org/2.35/en/developer/html/webapi_metadata_object_filter.html
if config.source.lastUpdated:
filters.append(f"lastUpdated:ge:{config.source.lastUpdated}")
option_sets_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.optionSets))
option_sets_filter.extend(filters)
option_sets = BaseHttpRequest(host).get(
"api/optionSets",
params={
"fields": "id,code,name,version,translations,options[id,code,name,translations]",
"rootJunction": "OR",
"filter": option_sets_filter,
"paging": False,
},
)
categories_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.categories))
categories_filter.extend(filters)
categories = BaseHttpRequest(host).get(
"api/categories",
params={
"fields": "id,code,name,translations,categoryOptions::rename(options)[id,code,name,translations]",
"rootJunction": "OR",
"filter": categories_filter,
"paging": False,
},
)
data = {
"optionSets": option_sets.get("optionSets", []),
"categories": categories.get("categories", []),
}
return (
host,
data,
)
return fn
def get_target(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
id = config.target.id
if "log://" == id:
log.info("Creating 'log://' target")
def target_log(data: Any):
log.info("Writing result to stdout")
print(json.dumps(data[1].as_json(), indent=2))
return target_log
elif "null://" == id:
log.info("Creating 'null://' target")
def target_null(data: Any):
log.info("Doing nothing with result")
return target_null
host = resolve_one(id, inventory)
if "dhis2" in host.type:
log.error("'dhis2' target type is not currently supported")
sys.exit(-1)
log.info(f"Creating target from '{host.key}' with base url '{host.baseUrl}'")
def target_push(data: Any):
payload: Bundle = data[1]
return BaseHttpRequest(host).post("", data=payload.as_json())
return target_push
def transform(config: SVCMConfig, data: Any):
host: HostResolved = data[0]
payload: Dict[str, Any] = data[1]
code_lists: List[CodeList] = []
option_sets = payload.get("optionSets", [])
categories = payload.get("categories", [])
for option_set in option_sets:
code_lists.append(CodeList(**option_set))
for category in categories:
code_lists.append(CodeList(**category, type="categories"))
return (
host,
build_bundle(code_lists, host.baseUrl),
)
def run(config: SVCMConfig, inventory: Inventory):
log.info(f"SVCM job '{config.id}'' starting")
source = get_source(config, inventory)
target = get_target(config, inventory)
data = source()
data = transform(config, data)
data = target(data)
if data:
log.info(f"Got response from target system {data}")
log.info(f"SVCM job '{config.id}' finished")
| 28.204225 | 114 | 0.615481 | import json
import logging
import sys
from typing import Any, Callable, Dict, List
from dhis2.core.http import BaseHttpRequest
from dhis2.core.inventory import HostResolved, Inventory, resolve_one
from fhir.resources.bundle import Bundle
from .models.svcm import CodeList, SVCMConfig
from .svcm_resources import build_bundle
log = logging.getLogger(__name__)
def get_source(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
host = resolve_one(config.source.id, inventory)
if "dhis2" not in host.type:
log.error("Only 'dhis2' source type is currently supported")
sys.exit(-1)
log.info(f"Creating source from '{host.key}' with base url '{host.baseUrl}'")
def fn():
filters = []
if config.source.lastUpdated:
filters.append(f"lastUpdated:ge:{config.source.lastUpdated}")
option_sets_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.optionSets))
option_sets_filter.extend(filters)
option_sets = BaseHttpRequest(host).get(
"api/optionSets",
params={
"fields": "id,code,name,version,translations,options[id,code,name,translations]",
"rootJunction": "OR",
"filter": option_sets_filter,
"paging": False,
},
)
categories_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.categories))
categories_filter.extend(filters)
categories = BaseHttpRequest(host).get(
"api/categories",
params={
"fields": "id,code,name,translations,categoryOptions::rename(options)[id,code,name,translations]",
"rootJunction": "OR",
"filter": categories_filter,
"paging": False,
},
)
data = {
"optionSets": option_sets.get("optionSets", []),
"categories": categories.get("categories", []),
}
return (
host,
data,
)
return fn
def get_target(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
id = config.target.id
if "log://" == id:
log.info("Creating 'log://' target")
def target_log(data: Any):
log.info("Writing result to stdout")
print(json.dumps(data[1].as_json(), indent=2))
return target_log
elif "null://" == id:
log.info("Creating 'null://' target")
def target_null(data: Any):
log.info("Doing nothing with result")
return target_null
host = resolve_one(id, inventory)
if "dhis2" in host.type:
log.error("'dhis2' target type is not currently supported")
sys.exit(-1)
log.info(f"Creating target from '{host.key}' with base url '{host.baseUrl}'")
def target_push(data: Any):
payload: Bundle = data[1]
return BaseHttpRequest(host).post("", data=payload.as_json())
return target_push
def transform(config: SVCMConfig, data: Any):
host: HostResolved = data[0]
payload: Dict[str, Any] = data[1]
code_lists: List[CodeList] = []
option_sets = payload.get("optionSets", [])
categories = payload.get("categories", [])
for option_set in option_sets:
code_lists.append(CodeList(**option_set))
for category in categories:
code_lists.append(CodeList(**category, type="categories"))
return (
host,
build_bundle(code_lists, host.baseUrl),
)
def run(config: SVCMConfig, inventory: Inventory):
log.info(f"SVCM job '{config.id}'' starting")
source = get_source(config, inventory)
target = get_target(config, inventory)
data = source()
data = transform(config, data)
data = target(data)
if data:
log.info(f"Got response from target system {data}")
log.info(f"SVCM job '{config.id}' finished")
| true | true |
7901b729a1fcda59aaac394589b9db9c6d3c000a | 93 | py | Python | src/UOJ_1933 - (3425561) Accepted.py | miguelarauj1o/UOJ | eb195754829c42c3dcf1a68616e63da1386cb5a9 | [
"MIT"
] | 80 | 2015-01-07T01:18:40.000Z | 2021-05-04T15:23:18.000Z | src/UOJ_1933 - (3425561) Accepted.py | miguelarauj1o/OJ | eb195754829c42c3dcf1a68616e63da1386cb5a9 | [
"MIT"
] | 1 | 2019-01-07T01:13:32.000Z | 2019-01-07T01:13:32.000Z | src/UOJ_1933 - (3425561) Accepted.py | miguelarauj1o/OJ | eb195754829c42c3dcf1a68616e63da1386cb5a9 | [
"MIT"
] | 28 | 2015-03-05T11:53:23.000Z | 2020-07-05T15:50:42.000Z | a, b = raw_input().split()
a = int(a)
b = int(b)
if b > a:
print(b)
else:
print(a)1 | 11.625 | 27 | 0.505376 | a, b = raw_input().split()
a = int(a)
b = int(b)
if b > a:
print(b)
else:
print(a)1 | false | true |
7901b7d9d3e6f43d5bf1e3baba09c66f4e443df2 | 1,943 | py | Python | Twitter_scraping/graph_builder.py | TristanThomson/Year-3-Final-Project | 07a588ff3312040c6ff41fd170c1909357991c66 | [
"OML"
] | 2 | 2020-01-01T16:04:04.000Z | 2020-01-27T13:14:22.000Z | Twitter_scraping/graph_builder.py | TristanThomson/Year-3-Final-Project | 07a588ff3312040c6ff41fd170c1909357991c66 | [
"OML"
] | null | null | null | Twitter_scraping/graph_builder.py | TristanThomson/Year-3-Final-Project | 07a588ff3312040c6ff41fd170c1909357991c66 | [
"OML"
] | null | null | null | import os
import networkx as nx
import pandas as pd
from pathlib import Path
from Twitter_scraping.scraper_helper import RandomPicker
G = nx.DiGraph() # initialises empty NetworkX graph
min_list = RandomPicker().min_df["Twitter"].dropna() # Pandas series from the "Twitter" col of the SYI dataset
mep_list = RandomPicker().all_df["Twitter"].dropna()
rootdir = os.getcwd() # path to parent folder of current file
def check_minorities():
for path in Path(rootdir).rglob('*.csv'):
curparent = str(path.parent.name)
if curparent in map(lambda x: x.lower(),min_list["Twitter"].dropna()) and not path.parent.parent.name == "minority":
print(curparent)
original = str(rootdir) + "/" + str(path.parent.parent.parent.name) + "/majority/" + str(
curparent) + "/" + str(path.name)
new = str(rootdir) + "/" + str(path.parent.parent.parent.name) + "/minority/" + str(curparent) + "/" + str(
path.name)
os.rename(original, new)
for path in Path(rootdir).rglob('*.csv'):
curparent = str(path.parent.name)
curfile = pd.read_csv(path, encoding='utf-8-sig')
if curparent.lower() in map(lambda x: x.lower(), min_list):
G.add_node(curparent, is_mep=1)
if str(path.name) == "following.csv":
print(path.name)
for i in curfile["username"]:
if i in map(lambda x: x.lower(), mep_list):
G.add_node(str(i), is_mep=1)
else:
G.add_node(str(i), is_mep=0)
G.add_edge(curparent, i)
else:
print(path.name)
for i in curfile["username"]:
if i in map(lambda x: x.lower(), mep_list):
G.add_node(str(i), is_mep=1)
else:
G.add_node(str(i), is_mep=0)
G.add_edge(str(i), curparent)
nx.write_gexf(G, "minority.gexf")
| 41.340426 | 124 | 0.58106 | import os
import networkx as nx
import pandas as pd
from pathlib import Path
from Twitter_scraping.scraper_helper import RandomPicker
G = nx.DiGraph()
min_list = RandomPicker().min_df["Twitter"].dropna()
mep_list = RandomPicker().all_df["Twitter"].dropna()
rootdir = os.getcwd()
def check_minorities():
for path in Path(rootdir).rglob('*.csv'):
curparent = str(path.parent.name)
if curparent in map(lambda x: x.lower(),min_list["Twitter"].dropna()) and not path.parent.parent.name == "minority":
print(curparent)
original = str(rootdir) + "/" + str(path.parent.parent.parent.name) + "/majority/" + str(
curparent) + "/" + str(path.name)
new = str(rootdir) + "/" + str(path.parent.parent.parent.name) + "/minority/" + str(curparent) + "/" + str(
path.name)
os.rename(original, new)
for path in Path(rootdir).rglob('*.csv'):
curparent = str(path.parent.name)
curfile = pd.read_csv(path, encoding='utf-8-sig')
if curparent.lower() in map(lambda x: x.lower(), min_list):
G.add_node(curparent, is_mep=1)
if str(path.name) == "following.csv":
print(path.name)
for i in curfile["username"]:
if i in map(lambda x: x.lower(), mep_list):
G.add_node(str(i), is_mep=1)
else:
G.add_node(str(i), is_mep=0)
G.add_edge(curparent, i)
else:
print(path.name)
for i in curfile["username"]:
if i in map(lambda x: x.lower(), mep_list):
G.add_node(str(i), is_mep=1)
else:
G.add_node(str(i), is_mep=0)
G.add_edge(str(i), curparent)
nx.write_gexf(G, "minority.gexf")
| true | true |
7901b7f2fa29741d72328bdbdbf92fc4d5c5f847 | 12,675 | py | Python | mmdet/models/backbones/res2net.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 12,377 | 2017-12-04T02:46:57.000Z | 2022-03-31T16:48:31.000Z | mmdet/models/backbones/res2net.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 1,851 | 2017-12-05T05:41:23.000Z | 2022-03-30T13:06:22.000Z | mmdet/models/backbones/res2net.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 4,198 | 2017-12-05T02:57:19.000Z | 2022-03-30T10:29:37.000Z | import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
# dcn in Res2Net bottle2neck is in ModuleList
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
| 36.008523 | 79 | 0.527732 | import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
| true | true |
7901b864d323897254d20ffd6ca52e6cb5e50268 | 27,129 | py | Python | legal-api/tests/unit/services/test_authorization.py | leksmall/lear | cc7d75be830d12bfcc33b89bb2c4f34795bcd518 | [
"Apache-2.0"
] | null | null | null | legal-api/tests/unit/services/test_authorization.py | leksmall/lear | cc7d75be830d12bfcc33b89bb2c4f34795bcd518 | [
"Apache-2.0"
] | null | null | null | legal-api/tests/unit/services/test_authorization.py | leksmall/lear | cc7d75be830d12bfcc33b89bb2c4f34795bcd518 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the Authorization Services.
Test-Suite to ensure that the Authorization Service is working as expected.
"""
from http import HTTPStatus
import pytest
from flask import jsonify
from legal_api.models.business import Business
from legal_api.services.authz import BASIC_USER, COLIN_SVC_ROLE, STAFF_ROLE, authorized, get_allowed, is_allowed
from tests import integration_authorization, not_github_ci
from .utils import helper_create_jwt
def test_jwt_manager_initialized(jwt):
"""Assert that the jwt_manager is created as part of the fixtures."""
assert jwt
@not_github_ci
def test_jwt_manager_correct_test_config(app_request, jwt):
"""Assert that the test configuration for the JWT is working as expected."""
message = 'This is a protected end-point'
protected_route = '/fake_jwt_route'
@app_request.route(protected_route)
@jwt.has_one_of_roles([STAFF_ROLE])
def get():
return jsonify(message=message)
# assert that JWT is setup correctly for a known role
token = helper_create_jwt(jwt, [STAFF_ROLE])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.OK
# assert the JWT fails for an unknown role
token = helper_create_jwt(jwt, ['SHOULD-FAIL'])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.UNAUTHORIZED
TEST_AUTHZ_DATA = [
('staff_role', # test name
'CP1234567', # business identifier
'happy-staff', # username
[STAFF_ROLE], # roles
['view', 'edit'], # allowed actions
['edit'], # requested action
HTTPStatus.OK), # expected response
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('authorized_user', 'CP0001237', 'CP1234567', [BASIC_USER], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@not_github_ci
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_AUTHZ_DATA)
def test_authorized_user(monkeypatch, app_request, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
"""Assert that the type of user authorization is correct, based on the expected outcome."""
from requests import Response
print(test_name)
# mocks, the get and json calls for requests.Response
def mock_get(*args, **kwargs): # pylint: disable=unused-argument; mocks of library methods
resp = Response()
resp.status_code = 200
return resp
def mock_json(self, **kwargs): # pylint: disable=unused-argument; mocks of library methods
return {'roles': allowed_actions}
monkeypatch.setattr('requests.sessions.Session.get', mock_get)
monkeypatch.setattr('requests.Response.json', mock_json)
# setup
@app_request.route('/fake_jwt_route/<string:identifier>')
@jwt.requires_auth
def get_fake(identifier: str):
if not authorized(identifier, jwt, ['view']):
return jsonify(message='failed'), HTTPStatus.METHOD_NOT_ALLOWED
return jsonify(message='success'), HTTPStatus.OK
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
# test it
rv = app_request.test_client().get(f'/fake_jwt_route/{identifier}', headers=headers)
# check it
assert rv.status_code == expected
TEST_INTEG_AUTHZ_DATA = [
('staff_role', # test name
'CP1234567', # business identifier
'happy-staff', # username
[STAFF_ROLE], # roles
['view', 'edit'], # allowed actions
['edit'], # requested action
HTTPStatus.OK), # expected response
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@integration_authorization
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_INTEG_AUTHZ_DATA)
def test_authorized_user_integ(monkeypatch, app, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
"""Assert that the type of user authorization is correct, based on the expected outcome."""
import flask # noqa: F401; import actually used in mock
# setup
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
# check it
if expected == HTTPStatus.OK:
assert rv
else:
assert not rv
def test_authorized_missing_args():
"""Assert that the missing args return False."""
identifier = 'a corp'
jwt = 'fake'
action = 'fake'
rv = authorized(identifier, jwt, None)
assert not rv
rv = authorized(identifier, None, action)
assert not rv
rv = authorized(None, jwt, action)
assert not rv
def test_authorized_bad_url(monkeypatch, app, jwt):
"""Assert that an invalid auth service URL returns False."""
import flask # noqa: F401; import actually used in mock
# setup
identifier = 'CP1234567'
username = 'username'
roles = [BASIC_USER]
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
auth_svc_url = app.config['AUTH_SVC_URL']
app.config['AUTH_SVC_URL'] = 'http://no.way.this.works/dribble'
rv = authorized(identifier, jwt, ['view'])
app.config['AUTH_SVC_URL'] = auth_svc_url
assert not rv
def test_authorized_invalid_roles(monkeypatch, app, jwt):
"""Assert that an invalid role returns False."""
import flask # noqa: F401 ; import actually used in mock
# setup noqa: I003
identifier = 'CP1234567'
username = 'username'
roles = ['NONE']
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
assert not rv
@pytest.mark.parametrize(
'test_name,state,legal_type,username,roles,expected',
[
# active business
('staff_active_cp', Business.State.ACTIVE, 'CP', 'staff', [STAFF_ROLE],
['annualReport', 'changeOfAddress', 'changeOfDirectors', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'specialResolution', 'registrarsNotation', 'registrarsOrder']),
('staff_active_bc', Business.State.ACTIVE, 'BC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_ben', Business.State.ACTIVE, 'BEN', 'staff', [STAFF_ROLE],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'conversion', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_cc', Business.State.ACTIVE, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_ulc', Business.State.ACTIVE, 'ULC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_llc', Business.State.ACTIVE, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_sp', Business.State.ACTIVE, 'SP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('staff_active_gp', Business.State.ACTIVE, 'GP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('user_active_cp', Business.State.ACTIVE, 'CP', 'user', [BASIC_USER],
['annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'specialResolution']),
('user_active_bc', Business.State.ACTIVE, 'BC', 'user', [BASIC_USER],
['alteration', 'dissolution', 'incorporationApplication', 'transition']),
('user_active_ben', Business.State.ACTIVE, 'BEN', 'user', [BASIC_USER],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'transition']),
('user_active_cc', Business.State.ACTIVE, 'CC', 'user', [BASIC_USER], ['dissolution']),
('user_active_ulc', Business.State.ACTIVE, 'ULC', 'user', [BASIC_USER], ['alteration', 'dissolution']),
('user_active_llc', Business.State.ACTIVE, 'LLC', 'user', [BASIC_USER], ['dissolution']),
('user_active_sp', Business.State.ACTIVE, 'SP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
('user_active_gp', Business.State.ACTIVE, 'GP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
# historical business
('staff_historical_cp', Business.State.HISTORICAL, 'CP', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration']}]),
('staff_historical_bc', Business.State.HISTORICAL, 'BC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ben', Business.State.HISTORICAL, 'BEN', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_cc', Business.State.HISTORICAL, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ulc', Business.State.HISTORICAL, 'ULC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_llc', Business.State.HISTORICAL, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('user_historical_llc', Business.State.HISTORICAL, 'LLC', 'user', [BASIC_USER], []),
]
)
def test_get_allowed(monkeypatch, app, jwt, test_name, state, legal_type, username, roles, expected):
"""Assert that get allowed returns valid filings."""
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
filing_types = get_allowed(state, legal_type, jwt)
assert filing_types == expected
@pytest.mark.parametrize(
'test_name,state,filing_type,sub_filing_type,legal_types,username,roles,expected',
[
# active business
('staff_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'annualReport', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'correction', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'correction', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'transition', None,
['BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('user_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
# historical business
('staff_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('user_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP', 'SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
]
)
def test_is_allowed(monkeypatch, app, jwt, test_name, state, filing_type, sub_filing_type,
legal_types, username, roles, expected):
"""Assert that get allowed returns valid filings."""
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
for legal_type in legal_types:
filing_types = is_allowed(state, filing_type, legal_type, jwt, sub_filing_type)
assert filing_types == expected
| 48.531306 | 135 | 0.635925 |
from http import HTTPStatus
import pytest
from flask import jsonify
from legal_api.models.business import Business
from legal_api.services.authz import BASIC_USER, COLIN_SVC_ROLE, STAFF_ROLE, authorized, get_allowed, is_allowed
from tests import integration_authorization, not_github_ci
from .utils import helper_create_jwt
def test_jwt_manager_initialized(jwt):
assert jwt
@not_github_ci
def test_jwt_manager_correct_test_config(app_request, jwt):
message = 'This is a protected end-point'
protected_route = '/fake_jwt_route'
@app_request.route(protected_route)
@jwt.has_one_of_roles([STAFF_ROLE])
def get():
return jsonify(message=message)
token = helper_create_jwt(jwt, [STAFF_ROLE])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.OK
token = helper_create_jwt(jwt, ['SHOULD-FAIL'])
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(protected_route, headers=headers)
assert rv.status_code == HTTPStatus.UNAUTHORIZED
TEST_AUTHZ_DATA = [
('staff_role',
'CP1234567',
'happy-staff',
[STAFF_ROLE],
['view', 'edit'],
['edit'],
HTTPStatus.OK),
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('authorized_user', 'CP0001237', 'CP1234567', [BASIC_USER], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@not_github_ci
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_AUTHZ_DATA)
def test_authorized_user(monkeypatch, app_request, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
from requests import Response
print(test_name)
def mock_get(*args, **kwargs):
resp = Response()
resp.status_code = 200
return resp
def mock_json(self, **kwargs):
return {'roles': allowed_actions}
monkeypatch.setattr('requests.sessions.Session.get', mock_get)
monkeypatch.setattr('requests.Response.json', mock_json)
@app_request.route('/fake_jwt_route/<string:identifier>')
@jwt.requires_auth
def get_fake(identifier: str):
if not authorized(identifier, jwt, ['view']):
return jsonify(message='failed'), HTTPStatus.METHOD_NOT_ALLOWED
return jsonify(message='success'), HTTPStatus.OK
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
rv = app_request.test_client().get(f'/fake_jwt_route/{identifier}', headers=headers)
assert rv.status_code == expected
TEST_INTEG_AUTHZ_DATA = [
('staff_role',
'CP1234567',
'happy-staff',
[STAFF_ROLE],
['view', 'edit'],
['edit'],
HTTPStatus.OK),
('colin svc role', 'CP1234567', 'CP1234567', [COLIN_SVC_ROLE], ['view', 'edit'], ['edit'],
HTTPStatus.OK),
('unauthorized_user', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['edit'],
HTTPStatus.METHOD_NOT_ALLOWED),
('missing_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, None,
HTTPStatus.METHOD_NOT_ALLOWED),
('invalid_action', 'CP1234567', 'Not-Match-Identifier', [BASIC_USER], None, ['scrum'],
HTTPStatus.METHOD_NOT_ALLOWED),
('add_comment_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['add_comment'],
HTTPStatus.METHOD_NOT_ALLOWED),
('court_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['court_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_notation_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_notation'],
HTTPStatus.METHOD_NOT_ALLOWED),
('registrars_order_not_allowed', 'CP0001237', 'CP1234567', [BASIC_USER], None, ['registrars_order'],
HTTPStatus.METHOD_NOT_ALLOWED),
]
@integration_authorization
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected',
TEST_INTEG_AUTHZ_DATA)
def test_authorized_user_integ(monkeypatch, app, jwt,
test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
import flask
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
if expected == HTTPStatus.OK:
assert rv
else:
assert not rv
def test_authorized_missing_args():
identifier = 'a corp'
jwt = 'fake'
action = 'fake'
rv = authorized(identifier, jwt, None)
assert not rv
rv = authorized(identifier, None, action)
assert not rv
rv = authorized(None, jwt, action)
assert not rv
def test_authorized_bad_url(monkeypatch, app, jwt):
import flask
identifier = 'CP1234567'
username = 'username'
roles = [BASIC_USER]
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
auth_svc_url = app.config['AUTH_SVC_URL']
app.config['AUTH_SVC_URL'] = 'http://no.way.this.works/dribble'
rv = authorized(identifier, jwt, ['view'])
app.config['AUTH_SVC_URL'] = auth_svc_url
assert not rv
def test_authorized_invalid_roles(monkeypatch, app, jwt):
import flask
identifier = 'CP1234567'
username = 'username'
roles = ['NONE']
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
assert not rv
@pytest.mark.parametrize(
'test_name,state,legal_type,username,roles,expected',
[
('staff_active_cp', Business.State.ACTIVE, 'CP', 'staff', [STAFF_ROLE],
['annualReport', 'changeOfAddress', 'changeOfDirectors', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'specialResolution', 'registrarsNotation', 'registrarsOrder']),
('staff_active_bc', Business.State.ACTIVE, 'BC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_ben', Business.State.ACTIVE, 'BEN', 'staff', [STAFF_ROLE],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'conversion', 'correction',
'courtOrder', 'dissolution', 'incorporationApplication',
'transition', 'registrarsNotation', 'registrarsOrder']),
('staff_active_cc', Business.State.ACTIVE, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_ulc', Business.State.ACTIVE, 'ULC', 'staff', [STAFF_ROLE],
['alteration', 'courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_llc', Business.State.ACTIVE, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'dissolution',
'registrarsNotation', 'registrarsOrder']),
('staff_active_sp', Business.State.ACTIVE, 'SP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('staff_active_gp', Business.State.ACTIVE, 'GP', 'staff', [STAFF_ROLE],
['changeOfRegistration', 'conversion', 'dissolution', 'registration']),
('user_active_cp', Business.State.ACTIVE, 'CP', 'user', [BASIC_USER],
['annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'specialResolution']),
('user_active_bc', Business.State.ACTIVE, 'BC', 'user', [BASIC_USER],
['alteration', 'dissolution', 'incorporationApplication', 'transition']),
('user_active_ben', Business.State.ACTIVE, 'BEN', 'user', [BASIC_USER],
['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors',
'dissolution', 'incorporationApplication', 'transition']),
('user_active_cc', Business.State.ACTIVE, 'CC', 'user', [BASIC_USER], ['dissolution']),
('user_active_ulc', Business.State.ACTIVE, 'ULC', 'user', [BASIC_USER], ['alteration', 'dissolution']),
('user_active_llc', Business.State.ACTIVE, 'LLC', 'user', [BASIC_USER], ['dissolution']),
('user_active_sp', Business.State.ACTIVE, 'SP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
('user_active_gp', Business.State.ACTIVE, 'GP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']),
('staff_historical_cp', Business.State.HISTORICAL, 'CP', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration']}]),
('staff_historical_bc', Business.State.HISTORICAL, 'BC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ben', Business.State.HISTORICAL, 'BEN', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_cc', Business.State.HISTORICAL, 'CC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_ulc', Business.State.HISTORICAL, 'ULC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('staff_historical_llc', Business.State.HISTORICAL, 'LLC', 'staff', [STAFF_ROLE],
['courtOrder', 'registrarsNotation', 'registrarsOrder',
{'restoration': ['fullRestoration', 'limitedRestoration']}]),
('user_historical_llc', Business.State.HISTORICAL, 'LLC', 'user', [BASIC_USER], []),
]
)
def test_get_allowed(monkeypatch, app, jwt, test_name, state, legal_type, username, roles, expected):
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
filing_types = get_allowed(state, legal_type, jwt)
assert filing_types == expected
@pytest.mark.parametrize(
'test_name,state,filing_type,sub_filing_type,legal_types,username,roles,expected',
[
('staff_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'annualReport', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'correction', None,
['CP', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'correction', None,
['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'transition', None,
['BC', 'BEN'], 'staff', [STAFF_ROLE], True),
('staff_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('staff_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], True),
('user_active_allowed', Business.State.ACTIVE, 'alteration', None,
['BC', 'BEN', 'ULC'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'alteration', None,
['CP', 'CC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'annualReport', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfAddress', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None,
['CP', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'changeOfDirectors', None,
['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None,
['CP', 'BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'specialResolution', None,
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'user', [BASIC_USER], True),
('user_active', Business.State.ACTIVE, 'transition', None,
['CP', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_active', Business.State.ACTIVE, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('staff_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True),
('staff_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('staff_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'staff', [STAFF_ROLE], False),
('user_historical', Business.State.HISTORICAL, 'alteration', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'annualReport', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfAddress', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfDirectors', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'correction', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'courtOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'dissolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP', 'SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'incorporationApplication', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'fullRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration',
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'specialResolution', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'transition', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsNotation', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registrarsOrder', None,
['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'registration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
('user_historical', Business.State.HISTORICAL, 'changeOfRegistration', None,
['SP', 'GP'], 'user', [BASIC_USER], False),
]
)
def test_is_allowed(monkeypatch, app, jwt, test_name, state, filing_type, sub_filing_type,
legal_types, username, roles, expected):
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': 'Bearer ' + token}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
for legal_type in legal_types:
filing_types = is_allowed(state, filing_type, legal_type, jwt, sub_filing_type)
assert filing_types == expected
| true | true |
7901b8fe19a02fe5084433c0ffdfd6efc080c6c9 | 3,118 | py | Python | nilearn/plotting/__init__.py | OliverWarrington/nilearn | d42d3b10eb543619ed4189f05b74ef2e75a92068 | [
"BSD-2-Clause"
] | 827 | 2015-01-30T23:11:42.000Z | 2022-03-29T21:21:05.000Z | nilearn/plotting/__init__.py | OliverWarrington/nilearn | d42d3b10eb543619ed4189f05b74ef2e75a92068 | [
"BSD-2-Clause"
] | 2,845 | 2015-01-04T22:14:41.000Z | 2022-03-31T20:28:09.000Z | nilearn/plotting/__init__.py | OliverWarrington/nilearn | d42d3b10eb543619ed4189f05b74ef2e75a92068 | [
"BSD-2-Clause"
] | 484 | 2015-02-03T10:58:19.000Z | 2022-03-29T21:57:16.000Z | """
Plotting code for nilearn
"""
# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux
import os
import sys
import importlib
###############################################################################
# Make sure that we don't get DISPLAY problems when running without X on
# unices
def _set_mpl_backend():
# We are doing local imports here to avoid polluting our namespace
try:
import matplotlib
except ImportError:
if importlib.util.find_spec("pytest") is not None:
from .._utils.testing import skip_if_running_tests
# No need to fail when running tests
skip_if_running_tests('matplotlib not installed')
raise
else:
from ..version import (_import_module_with_version_check,
OPTIONAL_MATPLOTLIB_MIN_VERSION)
# When matplotlib was successfully imported we need to check
# that the version is greater that the minimum required one
_import_module_with_version_check('matplotlib',
OPTIONAL_MATPLOTLIB_MIN_VERSION)
current_backend = matplotlib.get_backend().lower()
if 'inline' in current_backend or 'nbagg' in current_backend:
return
# Set the backend to a non-interactive one for unices without X
# (see gh-2560)
if (sys.platform not in ('darwin', 'win32') and
'DISPLAY' not in os.environ):
matplotlib.use('Agg')
_set_mpl_backend()
###############################################################################
from . import cm
from .img_plotting import (
plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,
plot_glass_brain, plot_connectome, plot_connectome_strength,
plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)
from .find_cuts import find_xyz_cut_coords, find_cut_slices, \
find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords
from .matrix_plotting import (plot_matrix, plot_contrast_matrix,
plot_design_matrix, plot_event)
from .html_surface import view_surf, view_img_on_surf
from .html_stat_map import view_img
from .html_connectome import view_connectome, view_markers
from .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,
plot_img_on_surf, plot_surf_contours)
__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
'plot_roi', 'plot_stat_map', 'plot_glass_brain',
'plot_markers', 'plot_connectome', 'plot_prob_atlas',
'find_xyz_cut_coords', 'find_cut_slices',
'plot_img_comparison',
'show', 'plot_matrix',
'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',
'view_surf', 'view_img_on_surf',
'view_img', 'view_connectome', 'view_markers',
'find_parcellation_cut_coords',
'find_probabilistic_atlas_cut_coords',
'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',
'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',
'plot_surf_contours']
| 42.712329 | 79 | 0.645927 |
import os
import sys
import importlib
| true | true |
7901ba540a18823e6758a809e3c64cd14f3b70b8 | 662 | py | Python | crawler_news/items.py | SecondDim/crawler-base | 21ba30a3f6a62f2eaee336331abeca04d2a4ed24 | [
"MIT"
] | 11 | 2019-12-21T14:57:17.000Z | 2021-07-15T17:32:10.000Z | crawler_news/items.py | SecondDim/crawler-base | 21ba30a3f6a62f2eaee336331abeca04d2a4ed24 | [
"MIT"
] | 6 | 2020-01-24T13:26:01.000Z | 2022-02-01T23:05:28.000Z | crawler_news/items.py | SecondDim/crawler-base | 21ba30a3f6a62f2eaee336331abeca04d2a4ed24 | [
"MIT"
] | 3 | 2020-02-28T06:07:20.000Z | 2021-01-07T09:58:47.000Z | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerNewsItem(scrapy.Item):
url = scrapy.Field() # str
article_from = scrapy.Field() # str
article_type = scrapy.Field() # str
title = scrapy.Field() # str
publish_date = scrapy.Field() # str
authors = scrapy.Field() # list json
tags = scrapy.Field() # list json
text = scrapy.Field() # list json
text_html = scrapy.Field() # str
images = scrapy.Field() # list json
video = scrapy.Field() # list json
links = scrapy.Field() # list json
| 27.583333 | 53 | 0.651057 |
import scrapy
class CrawlerNewsItem(scrapy.Item):
url = scrapy.Field()
article_from = scrapy.Field()
article_type = scrapy.Field()
title = scrapy.Field()
publish_date = scrapy.Field()
authors = scrapy.Field()
tags = scrapy.Field()
text = scrapy.Field()
text_html = scrapy.Field()
images = scrapy.Field()
video = scrapy.Field()
links = scrapy.Field()
| true | true |
7901bc9f1d31af5d50513e0c15bc57162093e24e | 26,892 | py | Python | falcon/inspect.py | hzdwang/falcon-1 | 1df2c0b7f21de773b3de70ea44af26f225c1887c | [
"Apache-2.0"
] | 2 | 2020-12-09T04:13:18.000Z | 2020-12-09T04:13:22.000Z | falcon/inspect.py | hzdwang/falcon-1 | 1df2c0b7f21de773b3de70ea44af26f225c1887c | [
"Apache-2.0"
] | null | null | null | falcon/inspect.py | hzdwang/falcon-1 | 1df2c0b7f21de773b3de70ea44af26f225c1887c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 by Federico Caselli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inspect utilities for falcon applications."""
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
# router inspection registry
_supported_routers = {} # type: Dict[Type, Callable]
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
"""Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
"""
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
"""Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
"""
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"""Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
"""
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# ------------------------------------------------------------------------
# Inspection classes
# ------------------------------------------------------------------------
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
"""Describes a responder method.
Args:
method (str): The HTTP method of this responder.
source_info (str): The source path of this function.
function_name (str): Name of the function.
internal (bool): Whether or not this was a default responder added
by the framework.
Attributes:
suffix (str): The suffix of this route function. This is set to an empty
string when the function has no suffix.
"""
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
# NOTE(CaselIT): internal falcon names do not start with on and do not have suffix
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
"""Describes a static route.
Args:
path (str): The prefix of the static route.
directory (str): The directory for the static route.
fallback_filename (str or None): Fallback filename to serve.
"""
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
"""Describes a sink.
Args:
prefix (str): The prefix of the sink.
name (str): The name of the sink function or class.
source_info (str): The source path where this sink was defined.
"""
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
"""Desribes an error handler.
Args:
error (name): The name of the error type.
name (str): The name of the handler.
source_info (str): The source path where this error handler was defined.
internal (bool): Whether or not this is a default error handler added by
the framework.
"""
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
"""Describes a middleware method.
Args:
function_name (str): Name of the method.
source_info (str): The source path of the method.
"""
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False # added for compatibility with RouteMethodInfo
class MiddlewareClassInfo(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (List[MiddlewareMethodInfo]): List of method defined by the middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
"""Describes a middleware tree entry.
Args:
name (str): The name of the method.
class_name (str): The class name of the method.
"""
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
"""Describes the middleware methods used by the app.
Args:
request (List[MiddlewareTreeItemInfo]): The `process_request` methods.
resource (List[MiddlewareTreeItemInfo]): The `process_resource` methods.
response (List[MiddlewareTreeItemInfo]): The `process_response` methods.
"""
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
"""Describes the middleware of the app.
Args:
middlewareTree (MiddlewareTreeInfo): The middleware tree of the app.
middlewareClasses (List[MiddlewareClassInfo]): The middleware classes of the app.
independent (bool): Whether or not the middleware components are executed
independently.
Attributes:
independent_text (str): Text created from the `independent` arg.
"""
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
"""Describes an application.
Args:
routes (List[RouteInfo]): The routes of the application.
middleware (MiddlewareInfo): The middleware information in the application.
static_routes (List[StaticRouteInfo]): The static routes of this application.
sinks (List[SinkInfo]): The sinks of this application.
error_handlers (List[ErrorHandlerInfo]): The error handlers of this application.
asgi (bool): Whether or not this is an ASGI application.
"""
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
"""
return StringVisitor(verbose, internal, name).process(self)
# ------------------------------------------------------------------------
# Visitor classes
# ------------------------------------------------------------------------
class InspectVisitor:
"""Base visitor class that implements the `process` method.
Subclasses must implement ``visit_<name>`` methods for each supported class.
"""
def process(self, instance: _Traversable):
"""Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
"""
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# ------------------------------------------------------------------------
# Helpers functions
# ------------------------------------------------------------------------
def _get_source_info(obj, default='[unknown file]'):
"""Try to get the definition file and line of obj.
Return default on error.
"""
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
# NOTE(vytas): If Falcon is cythonized, all default
# responders coming from cythonized modules will
# appear as built-in functions, and raise a
# TypeError when trying to locate the source file.
source_info = default
return source_info
def _get_source_info_and_name(obj):
"""Attempt to get the definition file and line of obj and its name."""
source_info = _get_source_info(obj, None)
if source_info is None:
# NOTE(caselit): a class instances return None. Try the type
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
"""Check if the module of the object is a falcon module."""
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
"""Filter the internal elements of an iterable."""
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
| 34.040506 | 93 | 0.622564 |
from functools import partial
import inspect
from typing import Callable, Dict, List, Optional, Type
from falcon import App, app_helpers
from falcon.routing import CompiledRouter
def inspect_app(app: App) -> 'AppInfo':
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
def inspect_routes(app: App) -> 'List[RouteInfo]':
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
def register_router(router_class):
def wraps(fn):
if router_class in _supported_routers:
raise ValueError(
'Another function is already registered'
' for the router {}'.format(router_class)
)
_supported_routers[router_class] = fn
return fn
return wraps
_supported_routers = {}
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
def inspect_sinks(app: App) -> 'List[SinkInfo]':
sinks = []
for prefix, sink, _ in app._sinks:
source_info, name = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
errors = []
for exc, fn in app._error_handlers.items():
source_info, name = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
_, name = _get_source_info_and_name(method)
cls = type(method.__self__)
_, cls_name = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = 'Process request', 'Process resource', 'Process response'
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
class_source_info, cls_name = _get_source_info_and_name(type(m))
methods = []
for method, name in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(
middlewareTree, middlewareClasses, app._independent_middleware
)
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = []
_traverse(router._roots, '')
return routes
class _Traversable:
__visit_name__ = 'N/A'
def to_string(self, verbose=False, internal=False) -> str:
return StringVisitor(verbose, internal).process(self)
def __repr__(self):
return self.to_string()
class RouteMethodInfo(_Traversable):
__visit_name__ = 'route_method'
def __init__(
self, method: str, source_info: str, function_name: str, internal: bool
):
self.method = method
self.source_info = source_info
self.function_name = function_name
self.internal = internal
if function_name.startswith('on'):
self.suffix = '_'.join(function_name.split('_')[2:])
else:
self.suffix = ''
class RouteInfo(_Traversable):
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
class StaticRouteInfo(_Traversable):
__visit_name__ = 'static_route'
def __init__(self, prefix: str, directory: str, fallback_filename: Optional[str]):
self.prefix = prefix
self.directory = directory
self.fallback_filename = fallback_filename
class SinkInfo(_Traversable):
__visit_name__ = 'sink'
def __init__(self, prefix: str, name: str, source_info: str):
self.prefix = prefix
self.name = name
self.source_info = source_info
class ErrorHandlerInfo(_Traversable):
__visit_name__ = 'error_handler'
def __init__(self, error: str, name: str, source_info: str, internal: bool):
self.error = error
self.name = name
self.source_info = source_info
self.internal = internal
class MiddlewareMethodInfo(_Traversable):
__visit_name__ = 'middleware_method'
def __init__(self, function_name: str, source_info: str):
self.function_name = function_name
self.source_info = source_info
self.internal = False
class MiddlewareClassInfo(_Traversable):
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: List[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
class MiddlewareTreeItemInfo(_Traversable):
__visit_name__ = 'middleware_tree_item'
_symbols = {
'process_request': '→',
'process_resource': '↣',
'process_response': '↢',
}
def __init__(self, name: str, class_name: str):
self.name = name
self.class_name = class_name
class MiddlewareTreeInfo(_Traversable):
__visit_name__ = 'middleware_tree'
def __init__(
self,
request: List[MiddlewareTreeItemInfo],
resource: List[MiddlewareTreeItemInfo],
response: List[MiddlewareTreeItemInfo],
):
self.request = request
self.resource = resource
self.response = response
class MiddlewareInfo(_Traversable):
__visit_name__ = 'middleware'
def __init__(
self,
middleware_tree: MiddlewareTreeInfo,
middleware_classes: List[MiddlewareClassInfo],
independent: bool,
):
self.middleware_tree = middleware_tree
self.middleware_classes = middleware_classes
self.independent = independent
if independent:
self.independent_text = 'Middleware are independent'
else:
self.independent_text = 'Middleware are dependent'
class AppInfo(_Traversable):
__visit_name__ = 'app'
def __init__(
self,
routes: List[RouteInfo],
middleware: MiddlewareInfo,
static_routes: List[StaticRouteInfo],
sinks: List[SinkInfo],
error_handlers: List[ErrorHandlerInfo],
asgi: bool,
):
self.routes = routes
self.middleware = middleware
self.static_routes = static_routes
self.sinks = sinks
self.error_handlers = error_handlers
self.asgi = asgi
def to_string(self, verbose=False, internal=False, name='') -> str:
return StringVisitor(verbose, internal, name).process(self)
class InspectVisitor:
def process(self, instance: _Traversable):
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError(
'This visitor does not support {}'.format(type(instance))
) from e
class StringVisitor(InspectVisitor):
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
def _get_source_info(obj, default='[unknown file]'):
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
source_info = default
return source_info
def _get_source_info_and_name(obj):
source_info = _get_source_info(obj, None)
if source_info is None:
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if name is None:
name = getattr(type(obj), '__name__', '[unknown]')
return source_info, name
def _is_internal(obj):
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
def _filter_internal(iterable, return_internal):
if return_internal:
return iterable
return [el for el in iterable if not el.internal]
| true | true |
7901bd5b4b172d6f3e1c4b47ac8b79bb97033ac2 | 6,202 | py | Python | tpdatasrc/tpgamefiles/rules/char_class/class016_sorcerer.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdatasrc/tpgamefiles/rules/char_class/class016_sorcerer.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdatasrc/tpgamefiles/rules/char_class/class016_sorcerer.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | from toee import *
import char_class_utils
import char_editor
###################################################
def GetConditionName(): # used by API
return "Sorcerer"
# def GetSpellCasterConditionName():
# return "Sorcerer Spellcasting"
def GetCategory():
return "Core 3.5 Ed Classes"
def GetClassDefinitionFlags():
return CDF_BaseClass | CDF_CoreClass
def GetClassHelpTopic():
return "TAG_SORCERERS"
classEnum = stat_level_sorcerer
###################################################
class_feats = {
1: (feat_simple_weapon_proficiency, feat_call_familiar)
}
class_skills = (skill_alchemy, skill_bluff, skill_concentration, skill_craft, skill_knowledge_arcana, skill_profession, skill_spellcraft)
spells_per_day = {
1: (5, 3),
2: (6, 4),
3: (6, 5),
4: (6, 6, 3),
5: (6, 6, 4),
6: (6, 6, 5, 3),
7: (6, 6, 6, 4),
8: (6, 6, 6, 5, 3),
9: (6, 6, 6, 6, 4),
10: (6, 6, 6, 6, 5, 3),
11: (6, 6, 6, 6, 6, 4),
12: (6, 6, 6, 6, 6, 5, 3),
13: (6, 6, 6, 6, 6, 6, 4),
14: (6, 6, 6, 6, 6, 6, 5, 3),
15: (6, 6, 6, 6, 6, 6, 6, 4),
16: (6, 6, 6, 6, 6, 6, 6, 5, 3),
17: (6, 6, 6, 6, 6, 6, 6, 6, 4),
18: (6, 6, 6, 6, 6, 6, 6, 6, 5, 3),
19: (6, 6, 6, 6, 6, 6, 6, 6, 6, 4),
20: (6, 6, 6, 6, 6, 6, 6, 6, 6, 6)
#lvl 0 1 2 3 4 5 6 7 8 9
}
spells_known = {
1: (4, 2),
2: (5, 2),
3: (5, 3),
4: (6, 3, 1),
5: (6, 4, 2),
6: (7, 4, 2, 1),
7: (7, 5, 3, 2),
8: (8, 5, 3, 2, 1),
9: (8, 5, 4, 3, 2),
10: (9, 5, 4, 3, 2, 1),
11: (9, 5, 5, 4, 3, 2),
12: (9, 5, 5, 4, 3, 2, 1),
13: (9, 5, 5, 4, 4, 3, 2),
14: (9, 5, 5, 4, 4, 3, 2, 1),
15: (9, 5, 5, 4, 4, 4, 3, 2),
16: (9, 5, 5, 4, 4, 4, 3, 2, 1),
17: (9, 5, 5, 4, 4, 4, 3, 3, 2),
18: (9, 5, 5, 4, 4, 4, 3, 3, 2, 1),
19: (9, 5, 5, 4, 4, 4, 3, 3, 3, 2),
20: (9, 5, 5, 4, 4, 4, 3, 3, 3, 3)
#lvl 0 1 2 3 4 5 6 7 8 9
}
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 2
def GetBabProgression():
return base_attack_bonus_type_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
# Spell casting
def GetSpellListType():
return spell_list_type_arcane
def GetSpellSourceType():
return spell_source_type_arcane
def GetSpellReadyingType():
return spell_readying_innate
def GetSpellsPerDay():
return spells_per_day
caster_levels = range(1, 21)
def GetCasterLevels():
return caster_levels
def GetSpellDeterminingStat():
return stat_charisma
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
abScore = obj.stat_base_get(stat_charisma)
if abScore > 10:
return 1
return 0
## Levelup callbacks
def IsSelectingSpellsOnLevelup( obj ):
return 1
def InitSpellSelection( obj, classLvlNew = -1, classLvlIncrement = 1):
classLvl = obj.stat_level_get(classEnum)
if classLvlNew <= 0:
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew ) # this regards spell list extension by stuff like Mystic Theurge
# Available Spells
spAvail = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
# add spell level labels
for p in range(0,maxSpellLvl+1):
spAvail.append(char_editor.KnownSpellInfo(spell_label_level_0 + p, 0, classEnum))
spAvail.sort()
char_editor.append_available_spells(spAvail)
# newly taken class
if classLvlNew == 1:
spEnums = []
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0, 0, classEnum)) # add "Level 0" label
for p in range(0,4): # 4 cantrips
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0, 3, classEnum))
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_1, 0, classEnum)) # add "Level 1" label
for p in range(0,2): # 2 level 1 spells
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_1, 3, classEnum))
char_editor.append_spell_enums(spEnums)
return 0
# Incrementing class level
spellListLvl = obj.stat_level_get(stat_spell_list_level, classEnum) + classLvlIncrement # the effective level for getting the number of spells known
spEnums = char_editor.get_known_class_spells(obj, classEnum) # get all spells known for this class
for spellLvl in range(0, maxSpellLvl+1):
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0 + spellLvl, 0, classEnum)) # add label
# add spells
newSpellsKnownCount = char_class_utils.GetSpellsKnownAddedCount( spells_known , spellListLvl, spellLvl)
print "new num spells for spell level " + str(spellLvl) + ": " + str(newSpellsKnownCount)
for q in range(0, newSpellsKnownCount):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0 + spellLvl, 3, classEnum))
isReplacing = 0
if spellListLvl >= 4 and (spellListLvl % 2) == 0: # spell replacement
isReplacing = 1
if char_editor.get_class_code() != classEnum: #grant this benefit only for strict levelup (also to prevent some headache...)
isReplacing = 0
if isReplacing == 0:
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
# mark as replaceable
for p in range(0,len(spEnums)):
spEnum = spEnums[p].spell_enum
if spell_vacant <= spEnum <= spell_label_level_9:
continue
if spell_new_slot_lvl_0 <= spEnum <= spell_new_slot_lvl_9:
continue
if char_editor.get_spell_level(spEnum, classEnum) <= maxSpellLvl-2:
spEnums[p].spell_status = 1 # marked as replaceable
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
def LevelupCheckSpells( obj ):
classLvl = obj.stat_level_get(classEnum)
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
spell_enums = char_editor.get_spell_enums()
for spInfo in spell_enums:
if spInfo.spell_enum == spell_vacant:
if maxSpellLvl >= 4 and spInfo.spell_level == 0: # in case the cantrips are causing problems
continue
return 0
return 1
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
spEnums = char_editor.get_spell_enums()
char_editor.spell_known_add(spEnums) # internally takes care of duplicates and the labels/vacant slots
return | 28.319635 | 149 | 0.688165 | from toee import *
import char_class_utils
import char_editor
lIncrement = 1):
classLvl = obj.stat_level_get(classEnum)
if classLvlNew <= 0:
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
spAvail = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
for p in range(0,maxSpellLvl+1):
spAvail.append(char_editor.KnownSpellInfo(spell_label_level_0 + p, 0, classEnum))
spAvail.sort()
char_editor.append_available_spells(spAvail)
if classLvlNew == 1:
spEnums = []
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0, 0, classEnum))
for p in range(0,4):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0, 3, classEnum))
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_1, 0, classEnum))
for p in range(0,2):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_1, 3, classEnum))
char_editor.append_spell_enums(spEnums)
return 0
spellListLvl = obj.stat_level_get(stat_spell_list_level, classEnum) + classLvlIncrement
spEnums = char_editor.get_known_class_spells(obj, classEnum)
for spellLvl in range(0, maxSpellLvl+1):
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0 + spellLvl, 0, classEnum))
newSpellsKnownCount = char_class_utils.GetSpellsKnownAddedCount( spells_known , spellListLvl, spellLvl)
print "new num spells for spell level " + str(spellLvl) + ": " + str(newSpellsKnownCount)
for q in range(0, newSpellsKnownCount):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0 + spellLvl, 3, classEnum))
isReplacing = 0
if spellListLvl >= 4 and (spellListLvl % 2) == 0:
isReplacing = 1
if char_editor.get_class_code() != classEnum:
isReplacing = 0
if isReplacing == 0:
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
for p in range(0,len(spEnums)):
spEnum = spEnums[p].spell_enum
if spell_vacant <= spEnum <= spell_label_level_9:
continue
if spell_new_slot_lvl_0 <= spEnum <= spell_new_slot_lvl_9:
continue
if char_editor.get_spell_level(spEnum, classEnum) <= maxSpellLvl-2:
spEnums[p].spell_status = 1
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
def LevelupCheckSpells( obj ):
classLvl = obj.stat_level_get(classEnum)
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
spell_enums = char_editor.get_spell_enums()
for spInfo in spell_enums:
if spInfo.spell_enum == spell_vacant:
if maxSpellLvl >= 4 and spInfo.spell_level == 0:
continue
return 0
return 1
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
spEnums = char_editor.get_spell_enums()
char_editor.spell_known_add(spEnums)
return | false | true |
7901bf183b68ae45dd4aeedf3308e89b5a443829 | 2,790 | py | Python | query_CNFUN.py | CNBP/RCAPI | 5d7bb0e3bad0928529e84f404830de90c6c03143 | [
"MIT"
] | null | null | null | query_CNFUN.py | CNBP/RCAPI | 5d7bb0e3bad0928529e84f404830de90c6c03143 | [
"MIT"
] | null | null | null | query_CNFUN.py | CNBP/RCAPI | 5d7bb0e3bad0928529e84f404830de90c6c03143 | [
"MIT"
] | null | null | null | import sys
from query_common import filter_records, ProjectMixins
from redcap import Project # note this is from PyCap.redcap
from typing import List
"""
This class of functions are responsible of retrieving relevant data structures from the CNFUN tables
"""
class CNFUN_project(ProjectMixins):
"""
One baby can have many admissions CaseIDs.
One hospital record can have many CaseIDs.
One baby has only one hospital record number.
"""
def __init__(
self, Token, URL, get_all_field=False,
):
"""
Create a project using PyCap
:param Token:
:param URL:
:return:
"""
# Several key properties we'll use throughout
self.project = Project(URL, Token)
# These are very important ID fields from the
fields_keyid = ["patientID", "cf_p_cnnpatientui"]
# For now, make sure to onyl get the data related to these key ids to reduce load time
self.data = self.get_fields(fields_keyid)
# if specified, get all the records.
if get_all_field:
self.data = self.project.export_records()
def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
Check the list, only retain the relevant records with matching PatientID are retained.
:param dataset: CNBPIDs & record ID correspondence list.
:param CNNPatientUI:
:return:
"""
list_filtered = None
filtered_field = "cf_p_cnnpatientui"
# Handling when babyIDs is string instead of list (allowing batch function).
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
list_filtered = filter_records(self.data, filtered_field, CNNPatientUI)
return list_filtered
def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table.
:return:
"""
# Listify the CNNPatientUI
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
# Filter with the information
list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI)
# Aggregate the list_PatientID
list_PatientID = []
for case in list_filtered_dict:
list_PatientID.append(case["patientid"])
return list_PatientID
def get_records_CNFUN(self, PatientID: str or List[str]):
"""
Retrieve the cases based on their INDEX which is the
:param cases:
:return:
"""
if type(PatientID) is str:
PatientID = [PatientID]
cases_data = self.project.export_records(records=PatientID)
return cases_data
| 32.44186 | 110 | 0.650538 | import sys
from query_common import filter_records, ProjectMixins
from redcap import Project
from typing import List
class CNFUN_project(ProjectMixins):
def __init__(
self, Token, URL, get_all_field=False,
):
self.project = Project(URL, Token)
# These are very important ID fields from the
fields_keyid = ["patientID", "cf_p_cnnpatientui"]
# For now, make sure to onyl get the data related to these key ids to reduce load time
self.data = self.get_fields(fields_keyid)
# if specified, get all the records.
if get_all_field:
self.data = self.project.export_records()
def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
list_filtered = None
filtered_field = "cf_p_cnnpatientui"
# Handling when babyIDs is string instead of list (allowing batch function).
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
list_filtered = filter_records(self.data, filtered_field, CNNPatientUI)
return list_filtered
def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
# Listify the CNNPatientUI
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
# Filter with the information
list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI)
# Aggregate the list_PatientID
list_PatientID = []
for case in list_filtered_dict:
list_PatientID.append(case["patientid"])
return list_PatientID
def get_records_CNFUN(self, PatientID: str or List[str]):
if type(PatientID) is str:
PatientID = [PatientID]
cases_data = self.project.export_records(records=PatientID)
return cases_data
| true | true |
7901bf4e043fcf1e2f4bfc4e4937b25cd22a1088 | 758 | py | Python | src/engine/main.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 7 | 2022-02-04T22:02:01.000Z | 2022-03-08T22:55:29.000Z | src/engine/main.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 3 | 2022-02-04T22:48:01.000Z | 2022-02-10T01:53:00.000Z | src/engine/main.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 1 | 2022-03-18T17:07:18.000Z | 2022-03-18T17:07:18.000Z | from src.engine.company_root import CompanyRoot
from src.engine.company_root_simples import CompanyRootSimples
from src.engine.partners import Partners
from src.engine.company import Company
from src.engine.company_tax_regime import CompanyTaxRegime
from src.engine.ref_date import main as engine_ref_date
from src.io.get_last_ref_date import main as get_last_ref_date
def main(ref_date=None):
ref_date = ref_date or get_last_ref_date()
CompanyRoot(ref_date=ref_date).execute()
Partners(ref_date=ref_date).execute()
CompanyRootSimples(ref_date=ref_date).execute()
CompanyTaxRegime(ref_date=ref_date).execute()
Company(ref_date=ref_date).execute()
engine_ref_date()
if __name__ == '__main__':
main()
| 32.956522 | 63 | 0.777045 | from src.engine.company_root import CompanyRoot
from src.engine.company_root_simples import CompanyRootSimples
from src.engine.partners import Partners
from src.engine.company import Company
from src.engine.company_tax_regime import CompanyTaxRegime
from src.engine.ref_date import main as engine_ref_date
from src.io.get_last_ref_date import main as get_last_ref_date
def main(ref_date=None):
ref_date = ref_date or get_last_ref_date()
CompanyRoot(ref_date=ref_date).execute()
Partners(ref_date=ref_date).execute()
CompanyRootSimples(ref_date=ref_date).execute()
CompanyTaxRegime(ref_date=ref_date).execute()
Company(ref_date=ref_date).execute()
engine_ref_date()
if __name__ == '__main__':
main()
| true | true |
7901bfee3778dd08118d7ec1c5e0e0d9e7c93415 | 1,004 | py | Python | alipay/aop/api/response/AlipayBossFncSettleSettlementbillCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayBossFncSettleSettlementbillCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayBossFncSettleSettlementbillCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SettlementbillOpenApiDTO import SettlementbillOpenApiDTO
class AlipayBossFncSettleSettlementbillCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncSettleSettlementbillCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, SettlementbillOpenApiDTO):
self._result_set = value
else:
self._result_set = SettlementbillOpenApiDTO.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossFncSettleSettlementbillCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| 33.466667 | 120 | 0.74004 |
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SettlementbillOpenApiDTO import SettlementbillOpenApiDTO
class AlipayBossFncSettleSettlementbillCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncSettleSettlementbillCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, SettlementbillOpenApiDTO):
self._result_set = value
else:
self._result_set = SettlementbillOpenApiDTO.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossFncSettleSettlementbillCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| true | true |
7901c0b5cba908fba613856a95ce492e8e7595c5 | 910 | py | Python | medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | Kaciras/leetcode | d203aecd1afe1af13a0384a9c657c8424aab322d | [
"MIT"
] | null | null | null | medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | Kaciras/leetcode | d203aecd1afe1af13a0384a9c657c8424aab322d | [
"MIT"
] | null | null | null | medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | Kaciras/leetcode | d203aecd1afe1af13a0384a9c657c8424aab322d | [
"MIT"
] | null | null | null | from utils import TreeNode, binary_tree
class Solution:
def __init__(self):
self.index = 0 # 利用[中序遍历左边元素数量 = 左子树节点总数]可以省掉这个计数的字段
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi) # 有些解法生成字典加快这步,但这会增大空间复杂度
if self.index < len(preorder) and preorder[self.index] in inorder[lo:j]:
node.left = build_node(lo, j)
if self.index < len(preorder) and preorder[self.index] in inorder[j + 1:hi]:
node.right = build_node(j + 1, hi)
return node
return build_node(0, len(preorder))
if __name__ == '__main__':
x = Solution().buildTree([1, 2, 4, 6, 5, 7, 8, 3, 9], [4, 6, 2, 7, 5, 8, 1, 9, 3])
x = Solution().buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
| 26 | 83 | 0.642857 | from utils import TreeNode, binary_tree
class Solution:
def __init__(self):
self.index = 0
def buildTree(self, preorder, inorder):
if not preorder:
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi)
if self.index < len(preorder) and preorder[self.index] in inorder[lo:j]:
node.left = build_node(lo, j)
if self.index < len(preorder) and preorder[self.index] in inorder[j + 1:hi]:
node.right = build_node(j + 1, hi)
return node
return build_node(0, len(preorder))
if __name__ == '__main__':
x = Solution().buildTree([1, 2, 4, 6, 5, 7, 8, 3, 9], [4, 6, 2, 7, 5, 8, 1, 9, 3])
x = Solution().buildTree([3, 9, 20, 15, 7], [9, 3, 15, 20, 7])
| true | true |
7901c1a4e85e018c419102609aa51b2d41092f36 | 1,187 | py | Python | scripts/batchAnnotator.py | PRIDE-Cluster/cluster-result-importer | 354150e2ea527bcc1d3398f75ebbeb346a4c3dc7 | [
"Apache-2.0"
] | null | null | null | scripts/batchAnnotator.py | PRIDE-Cluster/cluster-result-importer | 354150e2ea527bcc1d3398f75ebbeb346a4c3dc7 | [
"Apache-2.0"
] | 1 | 2015-02-09T16:35:54.000Z | 2015-02-09T16:37:51.000Z | scripts/batchAnnotator.py | PRIDE-Cluster/cluster-result-importer | 354150e2ea527bcc1d3398f75ebbeb346a4c3dc7 | [
"Apache-2.0"
] | null | null | null | import sys
from subprocess import Popen
import cx_Oracle
root_directory = sys.argv[1]
def main(directory):
public_projects = get_public_project_accessions()
for project_accession in public_projects:
Popen(['./runAnnotator.sh', directory, str(project_accession)])
# get all the project references from pride archive
def get_public_project_accessions():
accessions = list()
archive_cursor = connect_archive()
archive_cursor.execute(
"select accession from project where (submission_type='PRIDE' or submission_type='COMPLETE') and is_public = 1")
projects = archive_cursor.fetchall()
for project in projects:
accessions.append(project[0])
archive_cursor.close()
return accessions
# connect to pride archive database
def connect_archive():
# connect to archive database
archive_db = cx_Oracle.connect(
"${pride.repo.db.user}/${pride.repo.db.password}@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=ora-vm-032.ebi.ac.uk)(PORT=1531))(CONNECT_DATA=(SERVICE_NAME=PRIDEPRO)))")
# Create an cursor object for archive database
return archive_db.cursor()
if __name__ == '__main__':
main(root_directory) | 28.261905 | 173 | 0.730413 | import sys
from subprocess import Popen
import cx_Oracle
root_directory = sys.argv[1]
def main(directory):
public_projects = get_public_project_accessions()
for project_accession in public_projects:
Popen(['./runAnnotator.sh', directory, str(project_accession)])
def get_public_project_accessions():
accessions = list()
archive_cursor = connect_archive()
archive_cursor.execute(
"select accession from project where (submission_type='PRIDE' or submission_type='COMPLETE') and is_public = 1")
projects = archive_cursor.fetchall()
for project in projects:
accessions.append(project[0])
archive_cursor.close()
return accessions
def connect_archive():
archive_db = cx_Oracle.connect(
"${pride.repo.db.user}/${pride.repo.db.password}@(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=ora-vm-032.ebi.ac.uk)(PORT=1531))(CONNECT_DATA=(SERVICE_NAME=PRIDEPRO)))")
return archive_db.cursor()
if __name__ == '__main__':
main(root_directory) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.