code stringlengths 281 23.7M |
|---|
class NullSecretHandler(SecretHandler):
def __init__(self, logger: logging.Logger, source_root: Optional[str], cache_dir: Optional[str], version: str) -> None:
if (not source_root):
self.tempdir_source = tempfile.TemporaryDirectory(prefix='null-secret-', suffix='-source')
source_root = self.tempdir_source.name
if (not cache_dir):
self.tempdir_cache = tempfile.TemporaryDirectory(prefix='null-secret-', suffix='-cache')
cache_dir = self.tempdir_cache.name
logger.info(f'NullSecretHandler using source_root {source_root}, cache_dir {cache_dir}')
super().__init__(logger, source_root, cache_dir, version)
def load_secret(self, resource: 'IRResource', secret_name: str, namespace: str) -> Optional[SecretInfo]:
self.logger.debug(('NullSecretHandler (%s %s): load secret %s in namespace %s' % (resource.kind, resource.name, secret_name, namespace)))
return SecretInfo(secret_name, namespace, 'fake-secret', 'fake-tls-crt', 'fake-tls-key', 'fake-user-key', decode_b64=False) |
class TupleExpression(PrimaryExpression, TupleMixin, LValueMixin):
components: List[Expression]
flattened_expressions = synthesized()
flattened_expression_values = synthesized()
flattened_assignment_generators = synthesized()
def variables_map(self) -> (Statement.variables_pre components):
return self.variables_pre
def variables_map_step(self: ListElement[(TupleExpression, 'components')]) -> (Expression.variables_pre next):
return self.variables_post
def variables_post(self, components: {Statement.variables_post}):
if (len(components) == 0):
return self.variables_pre
return components[(- 1)].variables_post
def changed_variables(self, components: {VarStateMixin.changed_variables}):
return set().union(*(c.changed_variables for c in components))
def flattened_expressions(self, components: {TupleExpression.flattened_expressions}):
if self.is_inline_array:
return [self]
return flatten(*((c.flattened_expressions if isinstance(c, TupleExpression) else c) for c in components))
def flattened_expression_values(self, components: {TupleMixin.flattened_expression_values, Expression.expression_value}):
values = flatten(*((c.flattened_expression_values if isinstance(c, TupleMixin) else c.expression_value) for c in components))
if self.is_inline_array:
return [ir.Array(self, values)]
return values
def flattened_assignment_generators(self, components: {TupleExpression.flattened_assignment_generators, LValueMixin.assignment_generator}):
if self.is_inline_array:
return []
return flatten(*((c.flattened_assignment_generators if isinstance(c, TupleExpression) else c.assignment_generator) for c in components))
def expression_value(self, components: {Expression.expression_value}):
if (len(components) == 1):
return components[0].expression_value
return UndefinedAttribute(info='Tuple expression_value must not be used directly. Use flattened_expression_values instead.')
def assignment_generator(self):
return UndefinedAttribute(info='Use flattened_assignment_generators instead.')
def cfg(self, components: AstNode.cfg):
if self.is_inline_array:
return (CfgSimple.concatenate(*[c.cfg for c in components]) >> self.flattened_expression_values[0])
return CfgSimple.concatenate(*[c.cfg for c in components])
def cfg_lhs(self, components: LValueMixin.cfg_lhs):
return CfgSimple.concatenate(*[c.cfg_lhs for c in filter_by_type(components, LValueMixin)]) |
class ThreeCenterTwoElectronBase(Function):
def eval(cls, ia, ja, ka, ib, jb, kb, ic, jc, kc, N, a, b, c, A, B, C):
ang_moms = np.array((ia, ja, ka, ib, jb, kb, ic, jc, kc), dtype=int)
ang_moms2d = ang_moms.reshape((- 1), 3)
if any([(am < 0) for am in ang_moms]):
return 0
p = (a + b)
P = (((a * A) + (b * B)) / p)
mu = ((a * b) / p)
X_PC = (P - C)
rho = ((p * c) / (p + c))
def recur(N, *inds):
return cls(*inds, N, a, b, c, A, B, C)
def recur_hrr(cart_ind):
assert (N == 0)
incr_ang_moms = ang_moms2d.copy()
incr_ang_moms[(0, cart_ind)] += 1
incr_ang_moms[(1, cart_ind)] -= 1
decr_ang_moms = ang_moms2d.copy()
decr_ang_moms[(1, cart_ind)] -= 1
incr_ang_moms = incr_ang_moms.flatten()
decr_ang_moms = decr_ang_moms.flatten()
AB_dir = (A - B)[cart_ind]
return (recur(N, *incr_ang_moms) + (AB_dir * recur(N, *decr_ang_moms)))
def recur_vrr(cart_ind):
assert ((ib, jb, kb) == (0, 0, 0))
assert ((ic, jc, kc) == (0, 0, 0))
decr_a = ang_moms2d.copy()
decr_a[(0, cart_ind)] -= 1
decr_aa = decr_a.copy()
decr_aa[(0, cart_ind)] -= 1
PA_dir = (P - A)[cart_ind]
PC_dir = (P - C)[cart_ind]
ai = ((ia, ja, ka)[cart_ind] - 1)
_2p = (2 * p)
decr_a = decr_a.flatten()
decr_aa = decr_aa.flatten()
return (((PA_dir * recur(N, *decr_a)) - (((rho / p) * PC_dir) * recur((N + 1), *decr_a))) + ((ai / _2p) * (recur(N, *decr_aa) - ((rho / p) * recur((N + 1), *decr_aa)))))
def recur_vrr_aux(cart_ind):
decr_c = ang_moms2d.copy()
decr_c[(2, cart_ind)] -= 1
decr_cc = decr_c.copy()
decr_cc[(2, cart_ind)] -= 1
decr_ac = decr_c.copy()
decr_ac[(0, cart_ind)] -= 1
decr_bc = decr_c.copy()
decr_bc[(1, cart_ind)] -= 1
PC_dir = (P - C)[cart_ind]
la = (ia, ja, ka)[cart_ind]
lb = (ib, jb, kb)[cart_ind]
lc = ((ic, jc, kc)[cart_ind] - 1)
decr_c = decr_c.flatten()
decr_cc = decr_cc.flatten()
decr_ac = decr_ac.flatten()
decr_bc = decr_bc.flatten()
return ((((((p / (p + c)) * PC_dir) * recur((N + 1), *decr_c)) + ((lc / (2 * c)) * (recur(N, *decr_cc) - ((p / (p + c)) * recur((N + 1), *decr_cc))))) + ((la / (2 * (p + c))) * recur((N + 1), *decr_ac))) + ((lb / (2 * (p + c))) * recur((N + 1), *decr_bc)))
def recur_vrr_aux_sph(cart_ind):
assert ((ib, jb, kb) == (0, 0, 0))
decr_c = ang_moms2d.copy()
decr_c[(2, cart_ind)] -= 1
decr_ac = decr_c.copy()
decr_ac[(0, cart_ind)] -= 1
La = (ia, ja, ka)[cart_ind]
PC_dir = (P - C)[cart_ind]
return ((rho / c) * ((PC_dir * recur((N + 1), *decr_c.flatten())) + ((La / (2 * p)) * recur((N + 1), *decr_ac.flatten()))))
recur_vrr_aux_funcs = {'cart': recur_vrr_aux, 'sph': recur_vrr_aux_sph}
recur_vrr_aux_func = recur_vrr_aux_funcs[cls.aux_vrr]
if (ang_moms == 0).all():
X_AB = (A - B)
r2_PC = X_PC.dot(X_PC)
r2_AB = X_AB.dot(X_AB)
chi = (rho * r2_PC)
K = exp(((- mu) * r2_AB))
return (((((2 * (pi ** 2.5)) / sqrt((p + c))) / (p * c)) * K) * boys(N, chi))
elif (ib > 0):
return recur_hrr(0)
elif (jb > 0):
return recur_hrr(1)
elif (kb > 0):
return recur_hrr(2)
elif (ic > 0):
return recur_vrr_aux_func(0)
elif (jc > 0):
return recur_vrr_aux_func(1)
elif (kc > 0):
return recur_vrr_aux_func(2)
elif (ia > 0):
return recur_vrr(0)
elif (ja > 0):
return recur_vrr(1)
elif (ka > 0):
return recur_vrr(2) |
class Images():
def __init__(self, ui):
self.page = ui.page
def img(self, image: str=None, path: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, 'px'), align: str='center', html_code: str=None, profile: types.PROFILE_TYPE=None, tooltip: str=None, options: types.OPTION_TYPE=None) -> html.HtmlImage.Image:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
if ((height[0] not in [None, 'auto']) and (width[1] == '%')):
width = ('auto', '')
html_image = html.HtmlImage.Image(self.page, self.page.py.encode_html(image), self.page.py.encode_html(path), align, html_code, width, height, profile, (options or {}))
if (tooltip is not None):
html_image.tooltip(tooltip)
if (width[0] is None):
html_image.style.css.max_width = '100%'
html.Html.set_component_skin(html_image)
return html_image
def figure(self, image: str=None, caption: str=None, path: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, 'px'), align: str='center', html_code: str=None, profile: types.PROFILE_TYPE=None, tooltip: str=None, options: types.OPTION_TYPE=None) -> html.HtmlImage.Figure:
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
container = html.HtmlImage.Figure(self.page, [], None, None, width, None, height, False, align, None, html_code, 'figure', None, (options or {}), profile)
container.img = self.page.ui.img(image=image, path=path, width=(100, '%'), height=(None, 'px'), align='center', html_code=html_code, profile=profile, tooltip=tooltip, options=options)
container.add(container.img)
if (caption is not None):
container.caption = self.page.ui.tags.figcaption(caption)
container.add(container.caption)
if (width[0] == 'auto'):
container.style.css.display = 'inline-block'
html.Html.set_component_skin(container)
return container
def container(self, components: List[html.Html.Html], max_width: types.SIZE_TYPE=(900, 'px'), align: str='center', profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlContainer.Div:
max_width = Arguments.size(max_width, unit='%')
container = self.page.ui.div(components, profile=profile, options=options)
container.style.css.max_width = max_width[0]
container.style.css.text_align = align
if (align == 'center'):
container.style.css.margin = '0 auto'
html.Html.set_component_skin(container)
return container
def background(self, url: str, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(300, 'px'), size: str='cover', margin: int=0, align: str='center', html_code: str=None, position: str='middle', profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlContainer.Div:
div = self.page.ui.div(height=Arguments.size(height, 'px'), width=Arguments.size(width), html_code=html_code, options=options, profile=profile)
div.style.css.background_url(self.page.py.encode_html(url), size=size, margin=margin)
div.style.css.display = 'block'
div.style.css.text_align = align
div.style.css.vertical_align = position
div.style.css.padding = 'auto'
html.Html.set_component_skin(div)
return div
def wallpaper(self, url: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(100, '%'), size: str='cover', margin: int=0, align: str='center', html_code: str=None, position: str='middle', profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlImage.Background:
options = (options or {})
div = html.HtmlImage.Background(self.page, [], label=None, color=None, width=Arguments.size(width), icon=None, height=Arguments.size(height), editable=False, align='left', padding=None, html_code=html_code, tag='div', helper=None, options=options, profile=profile)
div.style.css.background_url((self.page.py.encode_html(url) if (url is not None) else None), size=size, margin=margin)
div.style.css.background_position = 'center center'
div.style.css.display = 'block'
div.style.css.text_align = align
div.style.css.vertical_align = position
div.style.css.padding = 'auto'
self.page.body.style.css.height = '100%'
html.Html.set_component_skin(div)
return div
def logo(self, url: str, width: types.SIZE_TYPE=(160, 'px'), height: types.SIZE_TYPE=(60, 'px'), top: types.SIZE_TYPE=(16, 'px'), left: types.SIZE_TYPE=(16, 'px'), profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlContainer.Div:
top = Arguments.size(top, 'px')
left = Arguments.size(left, 'px')
div = self.page.ui.div(height=Arguments.size(height, 'px'), width=Arguments.size(width), options=options, profile=profile)
div.style.css.background_url(url)
div.style.css.display = 'block'
div.style.css.position = 'absolute'
div.style.css.top = ('%s%s' % (top[0], top[1]))
div.style.css.left = ('%s%s' % (left[0], left[1]))
div.style.css.text_align = 'center'
div.style.css.vertical_align = 'middle'
div.style.css.padding = 'auto'
html.Html.set_component_skin(div)
return div
def youtube(self, video_id: str=None, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(None, 'px'), align: str='center', html_code: str=None, profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlImage.Image:
component = self.img('0.jpg', (' % video_id), Arguments.size(width), Arguments.size(height, 'px'), align, html_code, profile, options)
html.Html.set_component_skin(component)
return component
def circular(self, image: str=None, path: str=None, width: types.SIZE_TYPE=(200, 'px'), height: types.SIZE_TYPE=(200, 'px'), align: str='center', html_code: str=None, profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlImage.Image:
width = Arguments.size(width, unit='px')
height = Arguments.size(height, unit='px')
if ((height[0] is not None) and (width[1] == '%')):
width = ('auto', '')
html_image = html.HtmlImage.Image(self.page, image, path, align, html_code, width, height, profile, (options or {}))
html_image.style.css.padding = 5
html_image.style.css.borders_light()
html_image.style.css.border_radius = width[0]
html.Html.set_component_skin(html_image)
return html_image
def avatar(self, text: str='', image: str=None, path: str=None, status: str=None, width: types.SIZE_TYPE=(30, 'px'), height: types.SIZE_TYPE=(30, 'px'), align: str='center', html_code: str=None, profile: types.PROFILE_TYPE=None, menu: html.Html.Html=None, options: types.OPTION_TYPE=None) -> html.HtmlContainer.Div:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
options = (options or {})
status_map = {True: self.page.theme.success.base, 'available': self.page.theme.success.base, False: self.page.theme.danger.base, 'busy': self.page.theme.danger.base, 'out': self.page.theme.warning.base}
(bgcolor, margin_top) = (None, '-20%')
if (image is not None):
img = self.img(image, path, ((width[0] - 5), width[1]), ((height[0] - 5), height[1]), align='center', html_code=html_code, profile=profile, options=options)
img.style.css.border_radius = width[0]
img.style.css.margin = 2
margin_top = (- 8)
else:
if (not text):
text = 'anonymous'
bgcolor = Colors.randColor(self.page.py.hash(text))
img = self.page.ui.layouts.div(text[0].upper())
img.style.css.line_height = (width[0] - 5)
img.style.css.color = 'white'
img.style.css.font_size = width[0]
img.style.css.font_weight = 'bold'
img.style.css.padding = 0
img.style.css.middle()
if options.get('status', True):
status_o = self.page.ui.layouts.div(' ', width=(30, '%'), height=(30, '%'))
status_o.style.css.position = 'relative'
status_o.style.css.background_color = status_map.get(status, self.page.theme.greys[5])
status_o.style.css.border_radius = 30
status_o.style.css.margin_top = margin_top
status_o.style.css.float = 'right'
div = self.page.ui.layouts.div([img, status_o], width=width, height=height)
div.status = status_o
else:
div = self.page.ui.layouts.div([img], width=width, height=height)
if (bgcolor is not None):
img.style.css.background_color = bgcolor
img.style.css.text_stoke = ('1px %s' % bgcolor)
img.style.css.borders_light()
img.style.css.border_radius = width[0]
div.img = img
def add_menu(menu_item: Union[(html.Html.Html, list)]):
if isinstance(menu_item, list):
menu_item = self.page.ui.div(menu_item, width='auto')
menu_item.style.css.position = 'absolute'
menu_item.style.css.display = 'None'
menu_item.style.css.line_height = 'normal'
menu_item.style.css.border = ('1px solid %s' % self.page.theme.greys[4])
menu_item.style.css.border_radius = 5
menu_item.style.css.background_color = self.page.theme.dark_or_white()
menu_item.style.css.min_height = 20
menu_item.style.css.margin_top = 10
menu_with = Arguments.size(menu_item.style.css.width)
menu_item.style.css.right = 2
menu_item.style.css.padding = 3
menu_item.style.css.z_index = 600
div.__add__(menu_item)
div.style.css.position = 'relative'
div.img.click([menu_item.dom.toggle()])
div.menu = menu_item
div.add_menu = add_menu
div.img = img
if (align == 'center'):
div.style.css.margin = 'auto'
div.style.css.display = 'block'
html.Html.set_component_skin(div)
return div
def section(self, image: str, name: str, title: str, text: str, url: str=None, path: str=None, width: types.SIZE_TYPE=(200, 'px'), height: types.SIZE_TYPE=(200, 'px'), profile: types.PROFILE_TYPE=None, options: types.OPTION_TYPE=None) -> html.HtmlContainer.Div:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
img = self.img(image, width=((width[0] - 10), 'px'), height=(100, 'px'), path=path, options=options, profile=profile)
title = self.page.ui.title(title, level=2, options=options)
highlight = self.page.ui.texts.span(name, width=(50, 'px'), height=(20, 'px'), options=options, profile=profile)
paragraph = self.page.ui.texts.paragraph(text, options=options)
div = self.page.ui.layouts.div([highlight, img, title, paragraph], width=width, height=height, options=options, profile=profile)
highlight.css({'position': 'absolute', 'left': 0, 'background-color': self.page.theme.colors[(- 1)], 'color': self.page.theme.greys[0], 'padding': '0 2px'})
div.style.css.margin = 2
div.img = img
div.title = title
if (url is not None):
div.style.css.cursor = 'pointer'
div.click([self.page.js.location.href(url)])
div.style.add_classes.div.border_bottom()
html.Html.set_component_skin(div)
return div
def animated(self, image: str='', text: str='', title: str='', url: str=None, path: str=None, width: types.SIZE_TYPE=(200, 'px'), height: types.SIZE_TYPE=(200, 'px'), html_code: str=None, options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlImage.AnimatedImage:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
component = html.HtmlImage.AnimatedImage(self.page, image, text, title, html_code, url, path, width, height, options, profile)
html.Html.set_component_skin(component)
return component
def carousel(self, images: list=None, path: str=None, selected: int=0, width: types.SIZE_TYPE=(100, '%'), height: types.SIZE_TYPE=(300, 'px'), options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlImage.ImgCarousel:
width = Arguments.size(width)
height = Arguments.size(height, 'px')
if (height[1] == '%'):
raise ValueError('This height cannot be in percentage')
component = html.HtmlImage.ImgCarousel(self.page, (images or []), path, selected, width, height, (options or {}), profile)
html.Html.set_component_skin(component)
return component
def emoji(self, symbol: str=None, top: types.SIZE_TYPE=(20, 'px'), options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlImage.Emoji:
top = Arguments.size(top, 'px')
component = html.HtmlImage.Emoji(self.page, symbol, top, options, profile)
html.Html.set_component_skin(component)
return component
def icon(self, icon: str=None, family: str=None, width: types.SIZE_TYPE=(None, 'px'), html_code: str=None, height: types.SIZE_TYPE=(None, 'px'), color: str=None, tooltip: str=None, align: str='left', options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlImage.Icon:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
icon_details = self.page.icons.get(icon, family)
options = (options or {})
options['icon_family'] = (family or icon_details['icon_family'])
component = html.HtmlImage.Icon(self.page, icon_details['icon'], width=width, height=height, color=(color or 'inherit'), tooltip=tooltip, options=options, html_code=html_code, profile=profile)
if ((width[0] is not None) and (width[1] == 'px')):
notches = options.get('font-factor', 0)
component.style.css.font_size = ('%s%s' % ((width[0] - notches), width[1]))
if (align == 'center'):
component.style.css.margin = 'auto'
component.style.css.display = 'block'
html.Html.set_component_skin(component)
return component
def badge(self, text: str='', label: str=None, icon: str=None, width: types.SIZE_TYPE=(25, 'px'), height: types.SIZE_TYPE=(25, 'px'), background_color: str=None, color: str=None, url: str=None, tooltip: str=None, options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlImage.Badge:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
if (background_color is None):
background_color = self.page.theme.greys[0]
if (color is None):
color = self.page.theme.success.base
icon_details = self.page.icons.get(icon, options=options)
options = (options or {})
options['icon_family'] = icon_details['icon_family']
component = html.HtmlImage.Badge(self.page, text, width, height, label, icon_details['icon'], background_color, color, url, tooltip, options, profile)
html.Html.set_component_skin(component)
return component
def color(self, code: str, color: str=None, width: types.SIZE_TYPE=(110, 'px'), height: types.SIZE_TYPE=(25, 'px'), options: types.OPTION_TYPE=None, helper: str=None, profile: types.PROFILE_TYPE=None) -> html.HtmlContainer.Div:
width = Arguments.size(width, 'px')
height = Arguments.size(height, 'px')
div = self.page.ui.div(code, width=width, height=height, options=options, helper=helper, profile=profile)
div.style.css.background_color = code
div.style.css.line_height = ('%s%s' % (height[0], height[1]))
div.style.css.color = (color or self.page.theme.greys[0])
div.style.css.text_align = 'center'
div.style.css.border = '1px solid black'
div.style.css.vertical_align = 'middle'
html.Html.set_component_skin(div)
return div
def gallery(self, images: List[Union[(dict, html.Html.Html)]]=None, columns: int=6, width: types.SIZE_TYPE=(None, '%'), height: types.SIZE_TYPE=('auto', ''), options: types.OPTION_TYPE=None, profile: types.PROFILE_TYPE=None) -> html.HtmlContainer.Grid:
dflt_options = {}
if (options is not None):
dflt_options.update(options)
grid = self.page.ui.grid(width=width, height=height, options=dflt_options, profile=profile)
grid.style.css.margin_top = 20
grid.style.css.overflow = 'hidden'
grid.style.css.margin_bottom = 20
row = self.page.ui.row(options=dflt_options)
grid.images = []
grid.texts = {}
for (i, image) in enumerate(images):
if ((dflt_options.get('max') is not None) and (len(grid.images) > dflt_options.get('max'))):
break
if ((i % columns) == 0):
grid.add(row)
row = self.page.ui.row(options=dflt_options)
text = None
if (not hasattr(image, 'options')):
if isinstance(image, dict):
if ('htmlCode' not in image):
image['htmlCode'] = ('%s_%s' % (grid.htmlCode, i))
if ('align' not in image):
image['align'] = 'center'
if ('text' in image):
text = self.page.ui.text(image['text'], options=dflt_options)
text.style.css.bold()
text.style.css.white_space = 'nowrap'
grid.texts[i] = text
del image['text']
image = self.page.ui.img(**image)
else:
image = self.page.ui.img(image, html_code=('%s_%s' % (grid.htmlCode, i)), align='center')
image.style.css.font_factor(15)
image.style.add_classes.div.border_hover()
image.style.css.text_align = 'center'
grid.images.append(image)
if (text is not None):
text.style.css.display = 'inline-block'
text.style.css.width = '100%'
text.style.css.text_align = 'center'
row.add(self.page.ui.col([image, text], align='center', options=dflt_options))
else:
row.add(image)
row.attr['class'].add('mt-3')
for r in row:
r.attr['class'].add('px-1')
image.parent = row[(- 1)]
if len(row):
for i in range((columns - len(row))):
row.add(self.page.ui.text(' '))
for r in row:
r.attr['class'].add('px-1')
row.attr['class'].add('mt-3')
grid.add(row)
grid.style.css.color = self.page.theme.greys[6]
return grid
def epyk(self, align: str='center', width: types.SIZE_TYPE=(None, '%'), height: types.SIZE_TYPE=('auto', ''), html_code: str=None, profile: types.PROFILE_TYPE=None, tooltip: str=None, options: types.OPTION_TYPE=None):
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'static', 'images', 'epykIcon.PNG'), 'rb') as fp:
base64_bytes = base64.b64encode(fp.read())
base64_message = base64_bytes.decode('ascii')
img = ('data:image/x-icon;base64,%s' % base64_message)
icon = self.page.ui.img(img, align=align, width=width, height=height, html_code=html_code, profile=profile, tooltip=tooltip, options=options)
icon.css({'text-align': 'center', 'padding': 'auto', 'vertical-align': 'middle'})
html.Html.set_component_skin(icon)
return icon |
def can_delete_topic(user, topic=None):
kwargs = {}
if isinstance(topic, int):
kwargs['topic_id'] = topic
elif isinstance(topic, Topic):
kwargs['topic'] = topic
return Permission(Or(IsAtleastSuperModerator, And(IsModeratorInForum(), Has('deletetopic')), And(IsSameUser(), Has('deletetopic'), TopicNotLocked(**kwargs))), identity=user) |
def main():
curr_lexicon = dict()
ap = argparse.ArgumentParser(description='Convert Finnish dictionary TSV data into xerox/HFST lexc format')
ap.add_argument('--quiet', '-q', action='store_false', dest='verbose', default=False, help='do not print output to stdout while processing')
ap.add_argument('--verbose', '-v', action='store_true', default=False, help='print each step to stdout while processing')
ap.add_argument('--stemparts', '-p', action='append', required=True, dest='spfilenames', metavar='SPFILE', help='read lexical roots from SPFILEs')
ap.add_argument('--inflection', '-i', action='append', required=True, dest='inffilenames', metavar='INFFILE', help='read inflection from INFFILEs')
ap.add_argument('--suffix-regexes', '-r', action='append', required=True, dest='refilenames', metavar='REFILE', help='read suffix regexes from REFILEs')
ap.add_argument('--stub-deletions', '-d', action='append', required=True, dest='sdfilenames', metavar='SDFILE', help='read stub deletions from SDFILEs')
ap.add_argument('--exclude-pos', '-x', action='append', metavar='XPOS', help='exclude all XPOS parts of speech from generation')
ap.add_argument('--version', '-V', action='version')
ap.add_argument('--output', '-o', '--one-file', '-1', type=argparse.FileType('w'), required=True, metavar='OFILE', help='write output to OFILE')
ap.add_argument('--fields', '-F', action='store', default=1, metavar='N', help='require N fields for tables')
ap.add_argument('--separator', action='store', default='\t', metavar='SEP', help='use SEP as separator')
ap.add_argument('--comment', '-C', action='append', default=['#'], metavar='COMMENT', help='skip lines starting with COMMENT thatdo not have SEPs')
ap.add_argument('--strip', action='store', metavar='STRIP', help='strip STRIP from fields before using')
ap.add_argument('--format', '-f', action='store', default='omor', help='use specific output format for lexc data', choices=['omor', 'giella', 'ftb3', 'ftb1', 'none', 'apertium', 'labelsegments'])
args = ap.parse_args()
formatter = None
if (args.format == 'omor'):
formatter = OmorFormatter(args.verbose, new_para=False, allo=False, props=False, sem=False)
elif (args.format == 'ftb3'):
formatter = Ftb3Formatter(args.verbose)
elif (args.format == 'apertium'):
formatter = ApertiumFormatter(args.verbose)
elif (args.format == 'giella'):
formatter = GiellaFormatter(args.verbose)
elif (args.format == 'none'):
formatter = NoTagsFormatter(args.verbose, lemmatise=args.none_lemmas, segment=args.none_segments)
elif (args.format == 'labelsegments'):
formatter = LabeledSegmentsFormatter(args.verbose)
else:
print('DIDNT CONVERT FORMATTER YET', args.format)
exit(1)
if ((args.strip == '"') or (args.strip == "'")):
quoting = csv.QUOTE_ALL
else:
quoting = csv.QUOTE_NONE
if args.verbose:
print('Writing everything to', args.output.name)
if args.exclude_pos:
print('Not writing closed parts-of-speech data in', ','.join(args.exclude_pos))
deletions = dict()
for tsv_filename in args.sdfilenames:
if args.verbose:
print('Reading suffix mutations from', tsv_filename)
linecount = 0
with open(tsv_filename, 'r', newline='') as tsvfile:
tsv_reader = csv.DictReader(tsvfile, delimiter=args.separator, quoting=quoting, escapechar='\\', strict=True)
linecount = 0
for tsv_parts in tsv_reader:
linecount += 1
if (args.verbose and ((linecount % 1000) == 0)):
print(linecount, '...', sep='', end='\r')
if (len(tsv_parts) < 2):
print('bleh', file=stderr)
continue
if ('deletion' in tsv_parts):
deletions[tsv_parts['new_para']] = tsv_parts['deletion']
else:
deletions[tsv_parts['new_para']] = ''
print(formatter.copyright_lexc(), file=args.output)
if args.verbose:
print('Creating Multichar_Symbols and Root')
print(formatter.multichars_lexc(), file=args.output)
print('LEXICON Root', file=args.output)
print('0 GUESSERS ;', file=args.output)
for tsv_filename in args.refilenames:
if args.verbose:
print('Reading from', tsv_filename)
linecount = 0
print('! Omorfi guessers generated from', tsv_filename, '! date:', strftime('%Y-%m-%d %H:%M:%S+%Z'), '! params: ', ' '.join(argv), file=args.output)
print(formatter.copyright_lexc(), file=args.output)
curr_lexicon = ''
print('LEXICON GUESSERS', file=args.output)
with open(tsv_filename, 'r', newline='') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter=args.separator, strict=True)
for tsv_parts in tsv_reader:
linecount += 1
if (len(tsv_parts) < 1):
print(tsv_filename, linecount, 'Too few tabs on line', 'skipping following fields:', tsv_parts, file=stderr)
continue
pos = tsv_parts[0].split('_')[0]
if (pos not in ['ADJ', 'NOUN', 'VERB', 'PROPN', 'NUM', 'PRON', 'ADP', 'ADV', 'SYM', 'PUNCT', 'INTJ', 'X', 'DIGITS', 'CONJ', 'SCONJ', 'AUX', 'DET']):
print('Cannot deduce pos from incoming cont:', tsv_parts[0], 'Skipping')
continue
if (tsv_parts[0] not in deletions):
print('DATOISSA VIRHE!', tsv_parts[0], 'not in', args.sdfilenames)
continue
if (len(tsv_parts) == 2):
print(formatter.guesser2lexc(tsv_parts[1], deletions[tsv_parts[0]], tsv_parts[0]), file=args.output)
else:
print(formatter.guesser2lexc(None, deletions[tsv_parts[0]], tsv_parts[0]), file=args.output)
for tsv_filename in args.spfilenames:
if args.verbose:
print('Reading from', tsv_filename)
linecount = 0
print('! Omorfi stemparts generated from', tsv_file.name, '! date:', strftime('%Y-%m-%d %H:%M:%S+%Z'), '! params: ', ' '.join(argv), file=args.output)
print(formatter.copyright_lexc(), file=args.output)
curr_lexicon = ''
with open(tsv_filename, 'r', newline='') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter=args.separator, strict=True)
for tsv_parts in tsv_reader:
linecount += 1
if (len(tsv_parts) < 3):
print(tsv_filename, linecount, 'Too few tabs on line', 'skipping following fields:', tsv_parts, file=stderr)
continue
pos = tsv_parts[0].split('_')[0]
if (pos not in ['ADJ', 'NOUN', 'VERB', 'PROPN', 'NUM', 'PRON', 'ADP', 'ADV', 'SYM', 'PUNCT', 'INTJ', 'X', 'DIGITS', 'CONJ', 'SCONJ', 'AUX', 'DET']):
print('Cannot deduce pos from incoming cont:', tsv_parts[0], 'Skipping')
continue
if (curr_lexicon != tsv_parts[0]):
print('\nLEXICON', tsv_parts[0], end='\n\n', file=args.output)
curr_lexicon = tsv_parts[0]
for cont in tsv_parts[3:]:
print(formatter.continuation2lexc(tsv_parts[1], tsv_parts[2], cont), file=args.output)
for tsv_filename in args.inffilenames:
if args.verbose:
print('Reading from', tsv_filename)
linecount = 0
print('! Omorfi inflects generated from', tsv_file.name, '! date:', strftime('%Y-%m-%d %H:%M:%S+%Z'), '! params: ', ' '.join(argv), file=args.output)
print(formatter.copyright_lexc(), file=args.output)
curr_lexicon = ''
with open(tsv_filename, 'r', newline='') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter=args.separator, strict=True)
for tsv_parts in tsv_reader:
linecount += 1
if (len(tsv_parts) < 3):
print(tsv_filename, linecount, 'Too few tabs on line', 'skipping following fields:', tsv_parts, file=stderr)
continue
pos = tsv_parts[0].split('_')[0]
if (pos not in ['ADJ', 'NOUN', 'VERB', 'PROPN', 'NUM', 'PRON', 'ADP', 'ADV', 'SYM', 'PUNCT', 'INTJ', 'X', 'DIGITS', 'CONJ', 'SCONJ']):
print('Cannot deduce pos from incoming cont:', tsv_parts[0], 'Skipping')
continue
if (curr_lexicon != tsv_parts[0]):
print('\nLEXICON', tsv_parts[0], end='\n\n', file=args.output)
curr_lexicon = tsv_parts[0]
for cont in tsv_parts[3:]:
print(formatter.continuation2lexc(tsv_parts[1], tsv_parts[2], cont), file=args.output)
exit(0) |
def backup(*, volume: str, pool: str, namespace: str='', image: str, version_labels: Dict[(str, str)]={}, version_uid: str=None, source_compare: bool=False, context: Any=None):
signal_backup_pre.send(SIGNAL_SENDER, volume=volume, pool=pool, namespace=namespace, image=image, version_labels=version_labels, context=context)
version = None
try:
image_path = _rbd_image_path(pool=pool, namespace=namespace, image=image)
rbd_snap_ls = subprocess_run(['rbd', 'snap', 'ls', '--format=json', image_path], decode_json=True)
assert isinstance(rbd_snap_ls, list)
benjis_snapshots = [snapshot['name'] for snapshot in rbd_snap_ls if snapshot['name'].startswith(RBD_SNAP_NAME_PREFIX)]
if (len(benjis_snapshots) == 0):
logger.info('No previous RBD snapshot found, performing initial backup.')
result = backup_initial(volume=volume, pool=pool, namespace=namespace, image=image, version_uid=version_uid, version_labels=version_labels, source_compare=source_compare, context=context)
else:
for snapshot in benjis_snapshots[:(- 1)]:
snapshot_path = _rbd_image_path(pool=pool, namespace=namespace, image=image, snapshot=snapshot)
logger.info(f'Deleting older RBD snapshot {snapshot_path}.')
subprocess_run(['rbd', 'snap', 'rm', '--no-progress', snapshot_path])
last_snapshot = benjis_snapshots[(- 1)]
last_snapshot_path = _rbd_image_path(pool=pool, namespace=namespace, image=image, snapshot=last_snapshot)
logger.info(f'Newest RBD snapshot is {last_snapshot_path}.')
benji_ls = subprocess_run(['benji', '--machine-output', '--log-level', benji_log_level, 'ls', f'volume == "{volume}" and snapshot == "{last_snapshot}" and status == "valid"'], decode_json=True)
assert isinstance(benji_ls, dict)
assert ('versions' in benji_ls)
assert isinstance(benji_ls['versions'], list)
if (len(benji_ls['versions']) > 0):
assert ('uid' in benji_ls['versions'][0])
last_version_uid = benji_ls['versions'][0]['uid']
assert isinstance(last_version_uid, str)
result = backup_differential(volume=volume, pool=pool, namespace=namespace, image=image, last_snapshot=last_snapshot, last_version_uid=last_version_uid, version_uid=version_uid, version_labels=version_labels, source_compare=source_compare, context=context)
else:
logger.info(f'Existing RBD snapshot {last_snapshot_path} not found in Benji, deleting it and reverting to initial backup.')
subprocess_run(['rbd', 'snap', 'rm', '--no-progress', last_snapshot_path])
result = backup_initial(volume=volume, pool=pool, namespace=namespace, image=image, version_uid=version_uid, version_labels=version_labels, source_compare=source_compare, context=context)
assert (('versions' in result) and isinstance(result['versions'], list))
version = result['versions'][0]
except Exception as exception:
signal_backup_post_error.send(SIGNAL_SENDER, volume=volume, pool=pool, namespace=namespace, image=image, version_labels=version_labels, context=context, version=version, exception=exception)
else:
signal_backup_post_success.send(SIGNAL_SENDER, volume=volume, pool=pool, namespace=namespace, image=image, version_labels=version_labels, context=context, version=version) |
class BoundFunction(BoundFunctionBase):
def setup_impl(self, call_info: FunctionCallInfo):
returns = [ir.Argument(call_info.ast_node) for _ in range(call_info.result_arity)]
continuation = ir.Block(call_info.ast_node, returns, info='CONTINUATION')
destination = ir.CallTarget(self.member_access, self.member_access.expression_value, self.member_access.member_name)
(gas, gas_cfg) = self.get_gas(call_info)
(val, val_cfg) = self.get_value(call_info)
transfer = ir.Call(call_info.ast_node, destination, continuation, call_info.arguments, call_info.ast_node.names, val, gas)
self.flattened_expression_values = returns
self.cfg = ((((self.member_access.cfg >> val_cfg) >> gas_cfg) >> transfer) >> continuation) |
class WhoosheeQuery(BaseQuery):
def whooshee_search(self, search_string, group=whoosh.qparser.OrGroup, whoosheer=None, match_substrings=True, limit=None, order_by_relevance=10):
if (not whoosheer):
entities = set()
for cd in self.column_descriptions:
entities.add(cd['type'])
if (not hasattr(self, '_join_entities')):
for node in visitors.iterate(self.statement, {}):
if (isinstance(node, AnnotatedTable) or isinstance(node, AnnotatedAlias)):
entities.add(node.entity_namespace)
elif (self._join_entities and isinstance(self._join_entities[0], Mapper)):
entities.update(set([x.entity for x in self._join_entities]))
else:
entities.update(set(self._join_entities))
unaliased = set()
for entity in entities:
if isinstance(entity, (AliasedClass, AliasedInsp)):
unaliased.add(inspect(entity).mapper.class_)
else:
unaliased.add(entity)
whoosheer = next((w for w in _get_config(self)['whoosheers'] if (set(w.models) == unaliased)))
for (fname, field) in list(whoosheer.schema._fields.items()):
if field.unique:
uniq = fname
res = whoosheer.search(search_string=search_string, values_of=uniq, group=group, match_substrings=match_substrings, limit=limit)
if (not res):
return self.filter(text('null'))
attr = None
if hasattr(whoosheer, '_is_model_whoosheer'):
attr = getattr(whoosheer.models[0], uniq)
else:
for m in whoosheer.models:
if (m.__name__.lower() == uniq.split('_')[0]):
attr = getattr(m, uniq.split('_')[1])
search_query = self.filter(attr.in_(res))
if (order_by_relevance < 0):
search_query = search_query.order_by(sqlalchemy.sql.expression.case(*[((attr == uniq_val), index) for (index, uniq_val) in enumerate(res)]))
elif (order_by_relevance > 0):
search_query = search_query.order_by(sqlalchemy.sql.expression.case(*[((attr == uniq_val), index) for (index, uniq_val) in enumerate(res) if (index < order_by_relevance)], else_=order_by_relevance))
else:
pass
return search_query |
def convert(color: 'Color', space: str) -> Tuple[('Space', Vector)]:
chain = color._get_convert_chain(color._space, space)
coords = alg.no_nans(color[:(- 1)])
last = color._space
for (a, b, direction, adapt) in chain:
if (direction and adapt):
coords = color.chromatic_adaptation(a.WHITE, b.WHITE, coords)
coords = (b.from_base(coords) if direction else a.to_base(coords))
if ((not direction) and adapt):
coords = color.chromatic_adaptation(a.WHITE, b.WHITE, coords)
last = b
return (last, coords) |
def add_apks_to_per_app_repos(repodir, apks):
apks_per_app = dict()
for apk in apks:
apk['per_app_dir'] = os.path.join(apk['packageName'], 'fdroid')
apk['per_app_repo'] = os.path.join(apk['per_app_dir'], 'repo')
apk['per_app_icons'] = os.path.join(apk['per_app_repo'], 'icons')
apks_per_app[apk['packageName']] = apk
if (not os.path.exists(apk['per_app_icons'])):
logging.info(_('Adding new repo for only {name}').format(name=apk['packageName']))
os.makedirs(apk['per_app_icons'])
apkpath = os.path.join(repodir, apk['apkName'])
shutil.copy(apkpath, apk['per_app_repo'])
apksigpath = (apkpath + '.sig')
if os.path.exists(apksigpath):
shutil.copy(apksigpath, apk['per_app_repo'])
apkascpath = (apkpath + '.asc')
if os.path.exists(apkascpath):
shutil.copy(apkascpath, apk['per_app_repo']) |
def _parse_file(parser_class: Type[BaseParser], path: str) -> Tuple[(str, Dict[(str, List[int])])]:
parser = parser_class()
entries = defaultdict(list)
with open(path) as handle:
for position in parser.get_json_file_offsets(AnalysisOutput.from_handle(handle)):
entries[position.callable].append(position.offset)
return (path, dict(entries)) |
class initialize_config_dir():
def __init__(self, config_dir: str, job_name: str='app') -> None:
from hydra import initialize_config_dir as real_initialize_config_dir
message = 'hydra.experimental.initialize_config_dir() is no longer experimental. Use hydra.initialize_config_dir().'
if version.base_at_least('1.2'):
raise ImportError(message)
deprecation_warning(message=message)
self.delegate = real_initialize_config_dir(config_dir=config_dir, job_name=job_name)
def __enter__(self, *args: Any, **kwargs: Any) -> None:
self.delegate.__enter__(*args, **kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.delegate.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self) -> str:
return 'hydra.experimental.initialize_config_dir()' |
class TableFeaturesStats(base_tests.SimpleProtocol):
def runTest(self):
logging.info('Sending table features stats request')
stats = get_stats(self, ofp.message.table_features_stats_request())
logging.info('Received %d table features stats entries', len(stats))
for entry in stats:
logging.info(entry.show()) |
def generate_logic_condition_class(base) -> Type[LOGICCLASS]:
class BLogicCondition(base[LOGICCLASS]):
_cnf
def __init__(self, condition, tmp: bool=False):
super().__init__(condition, tmp)
def simplify_to_shortest(self, complexity_bound: int) -> BLogicCondition:
if (self.is_true or self.is_false or self.is_symbol):
return self
if (self._get_complexity_of_simplification() > complexity_bound):
return self
dnf_condition = self.to_dnf()
if (len(self) <= len(dnf_condition)):
return self
else:
return dnf_condition
def _get_complexity_of_simplification(self) -> int:
count = 1
for arg in self.operands:
count *= len(arg)
return count
_cnf
def substitute_by_true(self, condition: BLogicCondition, condition_handler: Optional[ConditionHandler]=None) -> BLogicCondition:
return super().substitute_by_true(condition, condition_handler)
_cnf
def remove_redundancy(self, condition_handler: ConditionHandler) -> BLogicCondition:
return super().remove_redundancy(condition_handler)
return BLogicCondition |
class OptionPlotoptionsTreegraphTooltipDatetimelabelformats(Options):
def day(self):
return self._config_get('%A, %e %b %Y')
def day(self, text: str):
self._config(text, js_type=False)
def hour(self):
return self._config_get('%A, %e %b, %H:%M')
def hour(self, text: str):
self._config(text, js_type=False)
def millisecond(self):
return self._config_get('%A, %e %b, %H:%M:%S.%L')
def millisecond(self, text: str):
self._config(text, js_type=False)
def minute(self):
return self._config_get('%A, %e %b, %H:%M')
def minute(self, text: str):
self._config(text, js_type=False)
def month(self):
return self._config_get('%B %Y')
def month(self, text: str):
self._config(text, js_type=False)
def second(self):
return self._config_get('%A, %e %b, %H:%M:%S')
def second(self, text: str):
self._config(text, js_type=False)
def week(self):
return self._config_get('Week from %A, %e %b %Y')
def week(self, text: str):
self._config(text, js_type=False)
def year(self):
return self._config_get('%Y')
def year(self, text: str):
self._config(text, js_type=False) |
.signal_handling
def test_cleanup_second_try_succeeds_after_killing_worker_with_retry(fake_sqs_queue):
logger = logging.getLogger(((__name__ + '.') + inspect.stack()[0][3]))
logger.setLevel(logging.DEBUG)
msg_body = randint(1111, 9998)
queue = fake_sqs_queue
queue.send_message(MessageBody=msg_body)
worker_sleep_interval = 0.05
cleanup_timeout = (worker_sleep_interval + 3.0)
dispatcher = SQSWorkDispatcher(queue, worker_process_name='Test Worker Process', long_poll_seconds=0, monitor_sleep_time=0.05, exit_handling_timeout=int(cleanup_timeout))
dispatcher.sqs_queue_instance.max_receive_count = 2
wq = mp.Queue()
tq = mp.Queue()
eq = mp.Queue()
dispatch_kwargs = {'job': _work_to_be_terminated, 'termination_queue': tq, 'work_tracking_queue': wq, 'cleanup_timeout': cleanup_timeout, 'exit_handler': _hanging_cleanup_if_worker_alive}
parent_dispatcher = mp.Process(target=_error_handling_dispatcher, args=(dispatcher, eq), kwargs=dispatch_kwargs)
parent_dispatcher.start()
work_done = wq.get()
assert (work_done == msg_body)
parent_pid = parent_dispatcher.pid
parent_proc = ps.Process(parent_pid)
worker_pid = tq.get(True, 1)
worker_proc = ps.Process(worker_pid)
tq.put(worker_pid)
parent_proc.terminate()
try:
while (worker_proc.is_running() and ps.pid_exists(worker_pid) and (worker_proc == ps.Process(worker_pid))):
wait_while = 5
logger.debug(f'Waiting {wait_while}s for worker to complete after parent received kill signal. worker pid = {worker_pid}, worker status = {worker_proc.status()}')
worker_proc.wait(timeout=wait_while)
except TimeoutExpired as tex:
pytest.fail(f'TimeoutExpired waiting for worker with pid {worker_proc.pid} to terminate (complete work).', tex)
try:
while (parent_dispatcher.is_alive() and parent_proc.is_running() and ps.pid_exists(parent_pid) and (parent_proc == ps.Process(parent_pid))):
wait_while = 3
logger.debug(f'Waiting {wait_while}s for parent dispatcher to complete after kill signal. parent_dispatcher pid = {parent_pid}, parent_dispatcher status = {parent_proc.status()}')
parent_dispatcher.join(timeout=wait_while)
except TimeoutExpired as tex:
pytest.fail(f'TimeoutExpired waiting for parent dispatcher with pid {parent_pid} to terminate (complete work).', tex)
try:
assert (parent_dispatcher.exitcode == (- signal.SIGTERM))
msgs = queue.receive_messages(WaitTimeSeconds=0)
assert (msgs is not None)
assert (len(msgs) == 1), 'Should be only 1 message received from queue'
assert (msgs[0].body == msg_body), 'Should be the same message available for retry on the queue'
assert (work_done == msg_body), 'Was expecting to find worker task_id (msg_body) tracked in the queue'
fail1_msg = 'Was expecting to find a trace of cleanup attempt 1 tracked in the work queue'
try:
cleanup_attempt_1 = wq.get(True, 1)
assert (cleanup_attempt_1 == 'cleanup_start_{}'.format(msg_body)), fail1_msg
except Empty as e:
pytest.fail((fail1_msg + ', but queue was empty'), e)
fail2_msg = 'Was expecting to find a trace of cleanup attempt 2 tracked in the work queue'
try:
cleanup_attempt_2 = wq.get(True, 1)
assert (cleanup_attempt_2 == 'cleanup_start_{}'.format(msg_body)), fail2_msg
except Empty as e:
pytest.fail((fail2_msg + ', but queue was empty'), e)
failend_msg = 'Was expecting to find a trace of cleanup reaching its end, after worker was killed (try 2)'
try:
cleanup_end = wq.get(True, 1)
assert (cleanup_end == 'cleanup_end_with_dead_worker_{}'.format(msg_body)), failend_msg
except Empty as e:
pytest.fail((failend_msg + ', but queue was empty'), e)
assert wq.empty(), 'There should be no more work tracked on the work Queue'
finally:
if (worker_proc and worker_proc.is_running()):
logger.warning('Dispatched worker process with PID {} did not complete in timeout. Killing it.'.format(worker_proc.pid))
os.kill(worker_proc.pid, signal.SIGKILL)
pytest.fail('Worker did not complete in timeout as expected. Test fails.')
_fail_runaway_processes(logger, dispatcher=parent_dispatcher) |
def show_speed(hash_file=None, session=None, wordlist=None, hash_mode=1000, speed_session=None, attack_mode=None, mask=None, rules=None, pot_path=None, brain=False, username=False, name=None, wordlist2=None):
started = rq.registry.StartedJobRegistry(queue=redis_q)
cur_list = started.get_job_ids()
speed_job = speed_q.fetch_job(speed_session)
if (len(cur_list) > 0):
cur_job = redis_q.fetch_job(cur_list[0])
if cur_job:
if del_check(cur_job):
logger.debug('Job stop already requested, not pausing')
time.sleep(10)
else:
cur_job.meta['CrackQ State'] = 'Pause'
logger.debug('Pausing active job')
cur_job.save_meta()
else:
logger.debug('Failed to pause current job')
raise ValueError('Speed check error')
if attack_mode:
if (not isinstance(attack_mode, int)):
attack_mode = None
job_dict = {}
job_dict['hash_mode'] = hash_mode
job_dict['attack_mode'] = attack_mode
job_dict['mask'] = mask
if wordlist:
job_dict['wordlist'] = [wl for (wl, path) in CRACK_CONF['wordlists'].items() if (path == wordlist)][0]
if wordlist2:
job_dict['wordlist2'] = [wl for (wl, path) in CRACK_CONF['wordlists'].items() if (path == wordlist2)][0]
if rules:
job_dict['rules'] = [rl for (rl, path) in CRACK_CONF['rules'].items() if (path == rules)]
job = redis_q.fetch_job(speed_session[:(- 6)])
if brain:
job_dict['brain_check'] = None
else:
logger.debug('Writing brain_check')
job_dict['brain_check'] = False
if job:
job.meta['brain_check'] = False
speed_job.save_meta()
if job:
job_dict['timeout'] = job.timeout
job_dict['name'] = name
job_dict['restore'] = 0
job_dict['Cracked Hashes'] = 0
job_dict['Total Hashes'] = 0
status_file = valid.val_filepath(path_string=log_dir, file_string='{}.json'.format(speed_session[:(- 6)]))
cq_api.write_template(job_dict, status_file)
outfile = valid.val_filepath(path_string=log_dir, file_string='{}.cracked'.format(speed_session[:(- 6)]))
try:
with open(outfile, 'w') as fh_outfile:
fh_outfile.truncate(0)
except FileNotFoundError:
logger.debug('No cracked file to clear')
hcat = runner(hash_file=hash_file, mask=mask, session=speed_session, wordlist=wordlist, outfile=str(outfile), attack_mode=attack_mode, hash_mode=hash_mode, wordlist2=wordlist2, username=username, pot_path=pot_path, show=True, brain=False)
hcat.event_connect(callback=cracked_callback, signal='EVENT_POTFILE_HASH_SHOW')
hcat.event_connect(callback=any_callback, signal='ANY')
counter = 0
while (counter < 100):
if ((hcat is None) or isinstance(hcat, str)):
return hcat
hc_state = hcat.status_get_status_string()
logger.debug('SHOW loop')
if speed_job:
if ('CrackQ State' in speed_job.meta):
if del_check(speed_job):
break
if (hc_state == 'Running'):
break
if (hc_state == 'Paused'):
break
elif (hc_state == 'Aborted'):
event_log = hcat.hashcat_status_get_log()
raise ValueError(event_log)
time.sleep(1)
counter += 1
logger.debug('SHOW loop complete, quitting hashcat')
hcat.hashcat_session_quit()
hcat.reset()
if brain:
logger.debug('Brain not disabled by user')
hcat = runner(hash_file=hash_file, mask=mask, wordlist=wordlist, speed=True, attack_mode=attack_mode, hash_mode=hash_mode, rules=rules, pot_path=pot_path, show=False, brain=False, session=speed_session, wordlist2=wordlist2)
hcat.event_connect(callback=any_callback, signal='ANY')
mode_info = dict(hash_modes.HModes.modes_dict())[str(hash_mode)]
logger.debug('Mode info: {}'.format(mode_info))
salts = hcat.status_get_salts_cnt()
logger.debug('Salts Count: {}'.format(salts))
speed_counter = 0
logger.debug('SPEED loop')
while (counter < 180):
if ((hcat is None) or isinstance(hcat, str)):
return hcat
if speed_job.meta:
if ('CrackQ State' in speed_job.meta):
if del_check(speed_job):
return hcat
hc_state = hcat.status_get_status_string()
if hc_state:
speed_job = speed_q.fetch_job(hcat.session)
logger.debug('Speed job:\n{}'.format(speed_job))
if (hc_state == 'Bypass'):
if (speed_job and mode_info):
logger.debug('Populating speed meta')
speed_info = int((hcat.status_get_hashes_msec_all() * 1000))
mode_info.append(speed_info)
mode_info.append(salts)
speed_job.meta['Mode Info'] = mode_info
speed_job.save_meta()
hc_state = hcat.status_get_status_string()
cur_list = started.get_job_ids()
cur_job = redis_q.fetch_job(cur_list[0])
if cur_job:
if (not del_check(cur_job)):
cur_job.meta['CrackQ State'] = 'Run/Restored'
cur_job.save_meta()
logger.debug('Resuming active job: {}'.format(cur_job.id))
else:
logger.debug('No job to resume')
hcat.status_reset()
hcat.hashcat_session_quit()
hcat.reset()
hc_state = hcat.status_get_status_string()
return hc_state
elif ('Aborted' in hc_state):
event_log = hcat.hashcat_status_get_log()
raise ValueError('Aborted: {}'.format(event_log))
else:
job = redis_q.fetch_job(session)
if job:
if del_check(job):
hcat.hashcat_session_quit()
hcat.reset()
logger.debug('No hc_state')
time.sleep(1)
speed_counter += 1
logger.debug('SPEED loop finished')
event_log = hcat.hashcat_status_get_log()
hcat.status_reset()
hcat.hashcat_session_quit()
hcat.reset()
if (len(cur_list) > 0):
cur_job = redis_q.fetch_job(cur_list[0])
else:
cur_job = None
if cur_job:
if (cur_job.meta['CrackQ State'] == 'Pause'):
cur_job.meta['CrackQ State'] = 'Run/Restored'
cur_job.save_meta()
logger.debug('Resuming active job')
raise ValueError('Speed check error: {}'.format(event_log))
else:
logger.debug('Brain user-disabled')
job = redis_q.fetch_job(session)
if job:
job.meta['brain_check'] = False
if (len(cur_list) > 0):
cur_job = redis_q.fetch_job(cur_list[0])
else:
cur_job = None
if cur_job:
cur_job.meta['CrackQ State'] = 'Run/Restored'
cur_job.save_meta()
logger.debug('Resuming active job')
return hc_state |
def test_suggested_deprecated_model_config_run_path(tmpdir):
runpath = 'simulations/realization-%d/iter-%d'
suggested_path = 'simulations/realization-<IENS>/iter-<ITER>'
mc = ModelConfig(num_realizations=1, runpath_format_string=runpath)
assert (mc.runpath_format_string == suggested_path) |
class Dissertation(GraphObject):
__primarykey__ = 'title'
title = Property()
consists = RelatedTo('Dissertation')
def __init__(self, title):
self.title = title
def find(self):
dissertation = self.match(graph, self.title).first()
return dissertation
def register(self):
if (not self.find()):
dissertation = Node('Dissertation', title=self.title)
self.node = dissertation
graph.create(dissertation)
print('Created ', self.title)
return True
else:
return False |
.parametrize('path, test_input, expected', [(None, {'accounts': [{'id': '10'}]}, TypeError), ([], {'accounts': [{'id': '10'}]}, TypeError), (['csp', 'accounts'], {'csp': {'accts': [{'id': '10'}]}}, ValueError)])
def test_load_with_invalid_path(path, test_input, expected):
with pytest.raises(expected):
acctload.MetaAccountLoader(test_input, path=path) |
class SalesFormsPrefs(QuickbooksBaseObject):
class_dict = {'DefaultTerms': Ref, 'SalesEmailBcc': EmailAddress, 'SalesEmailCc': EmailAddress}
detail_dict = {'CustomField': PreferencesCustomFieldGroup}
def __init__(self):
super().__init__()
self.ETransactionPaymentEnabled = False
self.CustomTxnNumbers = False
self.AllowShipping = False
self.AllowServiceDate = False
self.ETransactionEnabledStatus = ''
self.DefaultCustomerMessage = ''
self.EmailCopyToCompany = False
self.AllowEstimates = True
self.DefaultTerms = None
self.AllowDiscount = True
self.DefaultDiscountAccount = ''
self.DefaultShippingAccount = False
self.AllowDeposit = True
self.AutoApplyPayments = True
self.IPNSupportEnabled = False
self.AutoApplyCredit = True
self.CustomField = None
self.UsingPriceLevels = False
self.ETransactionAttachPDF = False
self.UsingProgressInvoicing = False
self.EstimateMessage = ''
self.DefaultTerms = None
self.CustomField = None
self.SalesEmailBcc = None
self.SalesEmailCc = None |
class OptionSeriesColumnSonificationDefaultinstrumentoptionsMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def runserver():
manager = make_server_manager(PORTNUM, AUTHKEY)
shared_job_q = manager.get_job_q()
shared_result_q = manager.get_result_q()
N = 999
nums = make_nums(N)
chunksize = 43
for i in range(0, len(nums), chunksize):
shared_job_q.put(nums[i:(i + chunksize)])
numresults = 0
resultdict = {}
while (numresults < N):
outdict = shared_result_q.get()
resultdict.update(outdict)
numresults += len(outdict)
for (num, factors) in iteritems(resultdict):
product = reduce((lambda a, b: (a * b)), factors, 1)
if (num != product):
assert False, ('Verification failed for number %s' % num)
print('--- DONE ---')
time.sleep(2)
manager.shutdown() |
def main(unused_argv):
if (len(sys.argv) == 1):
flags._global_parser.print_help()
sys.exit(0)
m = model.load_model(FLAGS.dragnn_spec, FLAGS.resource_path, FLAGS.checkpoint_filename, enable_tracing=FLAGS.enable_tracing, tf_master=FLAGS.tf_master)
sess = m['session']
graph = m['graph']
builder = m['builder']
annotator = m['annotator']
startTime = time.time()
while 1:
try:
line = sys.stdin.readline()
except KeyboardInterrupt:
break
if (not line):
break
line = line.strip()
if (not line):
continue
parsed_sentence = model.inference(sess, graph, builder, annotator, line, FLAGS.enable_tracing)
out = model.parse_to_conll(parsed_sentence)
f = sys.stdout
f.write((('# text = ' + line.encode('utf-8')) + '\n'))
for entry in out['conll']:
id = entry['id']
form = entry['form']
lemma = entry['lemma']
upostag = entry['upostag']
xpostag = entry['xpostag']
feats = entry['feats']
head = entry['head']
deprel = entry['deprel']
deps = entry['deps']
misc = entry['misc']
li = [id, form, lemma, upostag, xpostag, feats, head, deprel, deps, misc]
f.write(('\t'.join([str(e) for e in li]) + '\n'))
f.write('\n\n')
durationTime = (time.time() - startTime)
sys.stderr.write(('duration time = %f\n' % durationTime))
model.unload_model(m) |
def generate_encoded_user_data(env='dev', region='us-east-1', generated=None, group_name='', pipeline_type='', canary=False):
if (env in ['prod', 'prodp', 'prods']):
(env_c, env_p, env_s) = ('prod', 'prodp', 'prods')
else:
(env_c, env_p, env_s) = (env, env, env)
user_data = get_template(template_file='infrastructure/user_data.sh.j2', env=env, env_c=env_c, env_p=env_p, env_s=env_s, region=region, app_name=generated.app_name(), group_name=group_name, pipeline_type=pipeline_type, canary=canary, formats=generated)
return base64.b64encode(user_data.encode()).decode() |
class OptionSeriesPyramid3dSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class Raw(object):
def __init__(self, stream):
import termios
import tty
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty) |
def create_custom_rgb(gamut):
cs = Color.CS_MAP[gamut]
class RGB(type(Color.CS_MAP['srgb-linear'])):
NAME = '-rgb-{}'.format(gamut)
BASE = gamut
GAMUT_CHECK = gamut
CLIP_SPACE = None
WHITE = cs.WHITE
DYAMIC_RANGE = cs.DYNAMIC_RANGE
INDEXES = cs.indexes()
SCALE_SAT = cs.CHANNELS[INDEXES[1]].high
SCALE_LIGHT = cs.CHANNELS[INDEXES[1]].high
def to_base(self, coords):
coords = srgb_to_hsl(coords)
if (self.SCALE_SAT != 1):
coords[1] *= self.SCALE_SAT
if (self.SCALE_LIGHT != 1):
coords[2] *= self.SCALE_LIGHT
ordered = [0.0, 0.0, 0.0]
for (e, c) in enumerate(coords):
ordered[self.INDEXES[e]] = c
return ordered
def from_base(self, coords):
coords = [coords[i] for i in self.INDEXES]
if (self.SCALE_SAT != 1):
coords[1] /= self.SCALE_SAT
if (self.SCALE_LIGHT != 1):
coords[2] /= self.SCALE_LIGHT
coords = hsl_to_srgb(coords)
return coords
class ColorRGB(Color):
ColorRGB.register(RGB())
return ColorRGB |
def test_return_record_name_with_named_type_in_union():
schema = {'type': 'record', 'name': 'my_record', 'fields': [{'name': 'my_1st_union', 'type': [{'name': 'foo', 'type': 'record', 'fields': [{'name': 'some_field', 'type': 'int'}]}, {'name': 'bar', 'type': 'record', 'fields': [{'name': 'some_field', 'type': 'int'}]}]}, {'name': 'my_2nd_union', 'type': ['foo', 'bar']}]}
records = [{'my_1st_union': ('foo', {'some_field': 1}), 'my_2nd_union': ('bar', {'some_field': 2})}]
rt_records = roundtrip(fastavro.parse_schema(schema), records, return_record_name=True)
assert (records == rt_records) |
def node_game_index_fields(wizard, status=None):
if (not hasattr(wizard, 'game_index_listing')):
wizard.game_index_listing = settings.GAME_INDEX_LISTING
status_default = wizard.game_index_listing['game_status']
text = f'''
What is the status of your game?
- pre-alpha: a game in its very early stages, mostly unfinished or unstarted
- alpha: a working concept, probably lots of bugs and incomplete features
- beta: a working game, but expect bugs and changing features
- launched: a full, working game (that may still be expanded upon and improved later)
Current value (return to keep):
{status_default}
'''
options = ['pre-alpha', 'alpha', 'beta', 'launched']
wizard.display(text)
wizard.game_index_listing['game_status'] = wizard.ask_choice('Select one: ', options)
name_default = settings.SERVERNAME
text = f'''
Your game's name should usually be the same as `settings.SERVERNAME`, but
you can set it to something else here if you want.
Current value:
{name_default}
'''
def name_validator(inp):
tmax = 80
tlen = len(inp)
if (tlen > tmax):
print(f'The name must be shorter than {tmax} characters (was {tlen}).')
wizard.ask_continue()
return False
return True
wizard.display(text)
wizard.game_index_listing['game_name'] = wizard.ask_input(default=name_default, validator=name_validator)
sdesc_default = wizard.game_index_listing.get('short_description', None)
text = f'''
Enter a short description of your game. Make it snappy and interesting!
This should be at most one or two sentences (255 characters) to display by
'{settings.SERVERNAME}' in the main game list. Line breaks will be ignored.
Current value:
{sdesc_default}
'''
def sdesc_validator(inp):
tmax = 255
tlen = len(inp)
if (tlen > tmax):
print(f'The short desc must be shorter than {tmax} characters (was {tlen}).')
wizard.ask_continue()
return False
return True
wizard.display(text)
wizard.game_index_listing['short_description'] = wizard.ask_input(default=sdesc_default, validator=sdesc_validator)
long_default = wizard.game_index_listing.get('long_description', None)
text = f'''
Enter a longer, full-length description. This will be shown when clicking
on your game's listing. You can use
to create line breaks and may use
Markdown formatting like *bold*, _italic_, [linkname]( etc.
Current value:
{long_default}
'''
wizard.display(text)
wizard.game_index_listing['long_description'] = wizard.ask_input(default=long_default)
listing_default = wizard.game_index_listing.get('listing_contact', None)
text = f'''
Enter a listing email-contact. This will not be visible in the listing, but
allows us to get in touch with you should there be some listing issue (like
a name collision) or some bug with the listing (us actually using this is
likely to be somewhere between super-rarely and never).
Current value:
{listing_default}
'''
def contact_validator(inp):
if ((not inp) or ('' not in inp)):
print('This should be an email and cannot be blank.')
wizard.ask_continue()
return False
return True
wizard.display(text)
wizard.game_index_listing['listing_contact'] = wizard.ask_input(default=listing_default, validator=contact_validator)
hostname_default = wizard.game_index_listing.get('telnet_hostname', None)
text = f'''
Enter the hostname to which third-party telnet mud clients can connect to
your game. This would be the name of the server your game is hosted on,
like `coolgame.games.com`, or `mygreatgame.se`.
Write 'None' if you are not offering public telnet connections at this time.
Current value:
{hostname_default}
'''
wizard.display(text)
wizard.game_index_listing['telnet_hostname'] = wizard.ask_input(default=hostname_default)
port_default = wizard.game_index_listing.get('telnet_port', None)
text = f'''
Enter the main telnet port. The Evennia default is 4000. You can change
this with the TELNET_PORTS server setting.
Write 'None' if you are not offering public telnet connections at this time.
Current value:
{port_default}
'''
wizard.display(text)
wizard.game_index_listing['telnet_port'] = wizard.ask_input(default=port_default)
website_default = wizard.game_index_listing.get('game_website', None)
text = f'''
Evennia is its own web server and runs your game's website. Enter the
URL of the website here, like here.
Write 'None' if you are not offering a publicly visible website at this time.
Current value:
{website_default}
'''
wizard.display(text)
wizard.game_index_listing['game_website'] = wizard.ask_input(default=website_default)
webclient_default = wizard.game_index_listing.get('web_client_url', None)
text = f'''
Evennia offers its own native webclient. Normally it will be found from the
game homepage at something like Enter
your specific URL here (when clicking this link you should launch into the
web client)
Write 'None' if you don't want to list a publicly accessible webclient.
Current value:
{webclient_default}
'''
wizard.display(text)
wizard.game_index_listing['web_client_url'] = wizard.ask_input(default=webclient_default)
if (not (wizard.game_index_listing.get('web_client_url') or wizard.game_index_listing.get('telnet_host'))):
wizard.display("\nNote: You have not specified any connection options. This means your game \nwill be marked as being in 'closed development' in the index.")
wizard.display("\nDon't forget to inspect and save your changes.")
node_start(wizard) |
class ZipReader(AbstractReader):
useIndexFile = False
def __init__(self, path, ignoreErrors=True):
self._name = path
self._members = {}
self._pendingError = None
try:
self._members = self._readZipDirectory(fileObj=open(path, 'rb'))
except Exception:
((debug.logger & debug.flagReader) and debug.logger(('ZIP file %s open failure: %s' % (self._name, sys.exc_info()[1]))))
if (not ignoreErrors):
self._pendingError = error.PySmiError(('file %s access error: %s' % (self._name, sys.exc_info()[1])))
def _readZipDirectory(self, fileObj):
archive = zipfile.ZipFile(fileObj)
if isinstance(fileObj, FileLike):
fileObj = None
members = {}
for member in archive.infolist():
filename = os.path.basename(member.filename)
if (not filename):
continue
if (member.filename.endswith('.zip') or member.filename.endswith('.ZIP')):
innerZipBlob = archive.read(member.filename)
innerMembers = self._readZipDirectory(FileLike(innerZipBlob, member.filename))
for (innerFilename, ref) in innerMembers.items():
while (innerFilename in members):
innerFilename += '+'
members[innerFilename] = [[fileObj, member.filename, None]]
members[innerFilename].extend(ref)
else:
mtime = time.mktime(datetime.datetime(*member.date_time[:6]).timetuple())
members[filename] = [[fileObj, member.filename, mtime]]
return members
def _readZipFile(self, refs):
for (fileObj, filename, mtime) in refs:
if (not fileObj):
fileObj = FileLike(dataObj, name=self._name)
archive = zipfile.ZipFile(fileObj)
try:
dataObj = archive.read(filename)
except Exception:
((debug.logger & debug.flagReader) and debug.logger(('ZIP read component %s read error: %s' % (fileObj.name, sys.exc_info()[1]))))
return ('', 0)
return (dataObj, mtime)
def __str__(self):
return ('%s{"%s"}' % (self.__class__.__name__, self._name))
def getData(self, mibname, **options):
((debug.logger & debug.flagReader) and debug.logger(('looking for MIB %s at %s' % (mibname, self._name))))
if self._pendingError:
raise self._pendingError
if (not self._members):
raise error.PySmiReaderFileNotFoundError(('source MIB %s not found' % mibname), reader=self)
for (mibalias, mibfile) in self.getMibVariants(mibname, **options):
((debug.logger & debug.flagReader) and debug.logger(('trying MIB %s' % mibfile)))
try:
refs = self._members[mibfile]
except KeyError:
continue
(mibData, mtime) = self._readZipFile(refs)
if (not mibData):
continue
((debug.logger & debug.flagReader) and debug.logger(('source MIB %s, mtime %s, read from %s/%s' % (mibfile, time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(mtime)), self._name, mibfile))))
if (len(mibData) == self.maxMibSize):
raise IOError(('MIB %s/%s too large' % (self._name, mibfile)))
return (MibInfo(path=('zip://%s/%s' % (self._name, mibfile)), file=mibfile, name=mibalias, mtime=mtime), decode(mibData))
raise error.PySmiReaderFileNotFoundError(('source MIB %s not found' % mibname), reader=self) |
def test_django_ignore_transaction_urls(client, django_elasticapm_client):
with override_settings(**middleware_setting(django.VERSION, ['elasticapm.contrib.django.middleware.TracingMiddleware'])):
client.get('/no-error')
assert (len(django_elasticapm_client.events[TRANSACTION]) == 1)
django_elasticapm_client.config.update(1, transaction_ignore_urls='/no*')
client.get('/no-error')
assert (len(django_elasticapm_client.events[TRANSACTION]) == 1) |
class DataFactory(Factory):
class Meta():
model = Data
_id = Sequence((lambda n: n))
first_name = Faker('first_name')
last_name = Faker('last_name')
email = LazyAttribute(randon_email_factor)
ultimo_pagamento = LazyFunction(randon_date_factor)
status = FuzzyChoice(((['ativo', 'inativo'] * 100) + ['']))
pagamento = FuzzyChoice(((['em dia', 'inadinplente'] * 100) + [''])) |
def match_icmp_code(self, of_ports, priority=None):
pkt_match = simple_icmp_packet(icmp_code=3)
match = parse.packet_to_flow_match(pkt_match)
self.assertTrue((match is not None), 'Could not generate flow match from pkt')
match.wildcards = (((ofp.OFPFW_ALL ^ ofp.OFPFW_DL_TYPE) ^ ofp.OFPFW_NW_PROTO) ^ ofp.OFPFW_TP_DST)
match_send_flowadd(self, match, priority, of_ports[1])
return (pkt_match, match) |
def test_prep_steps():
df = load_df()
df = prep_df(df)
(train, test) = split_df(df)
(X_train, y_train) = get_feats_and_labels(train)
(X_test, y_test) = get_feats_and_labels(test)
assert (X_train.shape == (712, 8))
assert (y_train.shape == (712,))
assert (X_test.shape == (179, 8))
assert (y_test.shape == (179,)) |
class ThreadedDataLoader():
def __init__(self, model_class, processes=None, field_map={}, value_map={}, collision_field=None, collision_behavior='update', pre_row_function=None, post_row_function=None, post_process_function=None, loghandler='console'):
self.logger = logging.getLogger(loghandler)
self.model_class = model_class
self.processes = processes
if (self.processes is None):
self.processes = 1
self.logger.info(('Setting processes count to ' + str(self.processes)))
self.field_map = field_map
self.value_map = value_map
self.collision_field = collision_field
self.collision_behavior = collision_behavior
self.pre_row_function = pre_row_function
self.post_row_function = post_row_function
self.post_process_function = post_process_function
self.fields = [field.name for field in self.model_class._meta.get_fields()]
def load_from_file(self, filepath, encoding='utf-8', remote_file=False):
if (not remote_file):
self.logger.info(('Started processing file ' + filepath))
row_queue = JoinableQueue(500)
references = {'collision_field': self.collision_field, 'collision_behavior': self.collision_behavior, 'logger': self.logger, 'pre_row_function': self.pre_row_function, 'post_row_function': self.post_row_function, 'fields': self.fields.copy()}
db.connections.close_all()
pool = []
for i in range(self.processes):
pool.append(DataLoaderThread(('Process-' + str(len(pool))), self.model_class, row_queue, self.field_map.copy(), self.value_map.copy(), references))
for process in pool:
process.start()
if remote_file:
csv_file = codecs.getreader('utf-8')(filepath['Body'])
row_queue = self.csv_file_to_queue(csv_file, row_queue)
else:
with open(filepath, encoding=encoding) as csv_file:
row_queue = self.csv_file_to_queue(csv_file, row_queue)
for i in range(self.processes):
row_queue.put(None)
row_queue.join()
for process in pool:
process.terminate()
if (self.post_process_function is not None):
self.post_process_function()
self.logger.info('Finished processing all rows')
def csv_file_to_queue(self, csv_file, row_queue):
reader = csv.DictReader(csv_file)
temp_row_queue = row_queue
count = 0
for row in reader:
count = (count + 1)
temp_row_queue.put(row)
if ((count % 1000) == 0):
self.logger.info(('Queued row ' + str(count)))
return temp_row_queue |
class OptionPlotoptionsWordcloudSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class QRateLimitedExecutorRunnable(Logging):
def __init__(self, future: Future, fn: Callable, args: Optional[Sequence[Any]]=None, kwargs: Optional[Dict[(str, Any)]]=None):
self._future = future
self._fn = fn
self._args = ()
self._kwargs = {}
if args:
self._args = args
if kwargs:
self._kwargs = kwargs
def run(self):
if (not self._future.set_running_or_notify_cancel()):
return
try:
result = self._fn(*self._args, **self._kwargs)
except BaseException as exc:
self.logger.exception('Exception while running QRateLimitedExecutorRunnable')
self._future.set_exception(exc)
self = None
else:
self._future.set_result(result)
def cancel(self):
return self._future.cancel()
def result(self):
return self._future.result() |
('google.auth.compute_engine._metadata')
def test_persist_ss(mock_gcs):
default_img = Image(name='default', fqn='test', tag='tag')
ss = SerializationSettings(project='proj1', domain='dom', version='version123', env=None, image_config=ImageConfig(default_image=default_img, images=[default_img]))
ss_txt = ss.serialized_context
os.environ['_F_SS_C'] = ss_txt
with setup_execution('s3://', checkpoint_path=None, prev_checkpoint=None) as ctx:
assert (ctx.serialization_settings.project == 'proj1')
assert (ctx.serialization_settings.domain == 'dom') |
class MerchantInformation(BaseModel):
merchant_name: Optional[StrictStr] = None
merchant_address: Optional[StrictStr] = None
merchant_phone: Optional[StrictStr] = None
merchant_url: Optional[StrictStr] = None
merchant_siret: Optional[StrictStr] = None
merchant_siren: Optional[StrictStr] = None
merchant_vat_number: Optional[StrictStr] = None
merchant_gst_number: Optional[StrictStr] = None
merchant_abn_number: Optional[StrictStr] = None |
class bsn_table_set_buckets_size(bsn_header):
version = 6
type = 4
experimenter = 6035143
subtype = 61
def __init__(self, xid=None, table_id=None, buckets_size=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (table_id != None):
self.table_id = table_id
else:
self.table_id = 0
if (buckets_size != None):
self.buckets_size = buckets_size
else:
self.buckets_size = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
packed.append(('\x00' * 1))
packed.append(struct.pack('!B', self.table_id))
packed.append(('\x00' * 2))
packed.append(struct.pack('!L', self.buckets_size))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bsn_table_set_buckets_size()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 4)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
_subtype = reader.read('!L')[0]
assert (_subtype == 61)
reader.skip(1)
obj.table_id = reader.read('!B')[0]
reader.skip(2)
obj.buckets_size = reader.read('!L')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.table_id != other.table_id):
return False
if (self.buckets_size != other.buckets_size):
return False
return True
def pretty_print(self, q):
q.text('bsn_table_set_buckets_size {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('table_id = ')
q.text(('%#x' % self.table_id))
q.text(',')
q.breakable()
q.text('buckets_size = ')
q.text(('%#x' % self.buckets_size))
q.breakable()
q.text('}') |
class Solution():
def maxCoins(self, iNums: List[int]) -> int:
nums = (([1] + [i for i in iNums if (i > 0)]) + [1])
n = len(nums)
dp = [([0] * n) for _ in range(n)]
for k in range(2, n):
for left in range(0, (n - k)):
right = (left + k)
for i in range((left + 1), right):
dp[left][right] = max(dp[left][right], ((((nums[left] * nums[i]) * nums[right]) + dp[left][i]) + dp[i][right]))
return dp[0][(n - 1)] |
def extraRouteProgress(routes):
bestGain = max(routes, key=(lambda route: route.gainCr)).gainCr
worstGain = min(routes, key=(lambda route: route.gainCr)).gainCr
if (bestGain != worstGain):
gainText = '{:n}-{:n}cr gain'.format(worstGain, bestGain)
else:
gainText = '{:n}cr gain'.format(bestGain)
bestGPT = int(max(routes, key=(lambda route: route.gpt)).gpt)
worstGPT = int(min(routes, key=(lambda route: route.gpt)).gpt)
if (bestGPT != worstGPT):
gptText = '{:n}-{:n}cr/ton'.format(worstGPT, bestGPT)
else:
gptText = '{:n}cr/ton'.format(bestGPT)
return '.. {}, {}'.format(gainText, gptText) |
class OptionPlotoptionsHistogramSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class UserDataDumper(object):
def __init__(self, user):
self.user = user
def dumps(self, pretty=False):
app.logger.info("Dumping all user data for '%s'", self.user.name)
if pretty:
return json.dumps(self.data, indent=2)
return json.dumps(self.data)
def data(self):
data = self.user_information
data['groups'] = self.groups
data['projects'] = self.projects
data['builds'] = self.builds
return data
def user_information(self):
return {'username': self.user.name, 'email': self.user.mail, 'timezone': self.user.timezone, 'api_login': self.user.api_login, 'api_token': self.user.api_token, 'api_token_expiration': self.user.api_token_expiration.strftime('%b %d %Y %H:%M:%S'), 'gravatar': self.user.gravatar_url}
def groups(self):
return [{'name': g.name, 'url': url_for('groups_ns.list_projects_by_group', group_name=g.name, _external=True)} for g in self.user.user_groups]
def projects(self):
return [{'full_name': p.full_name, 'url': copr_url('coprs_ns.copr_detail', p, _external=True)} for p in coprs_logic.CoprsLogic.filter_by_user_name(coprs_logic.CoprsLogic.get_multiple(), self.user.name)]
def builds(self):
return [{'id': b.id, 'project': b.copr.full_name, 'url': copr_url('coprs_ns.copr_build', b.copr, build_id=b.id, _external=True)} for b in self.user.builds] |
def __create_playlist_context_menu():
smi = menu.simple_menu_item
sep = menu.simple_separator
items = []
items.append(menuitems.EnqueueMenuItem('enqueue', []))
items.append(SPATMenuItem('toggle-spat', [items[(- 1)].name]))
def rating_get_tracks_func(menuobj, parent, context):
return [row[1] for row in context['selected-items']]
items.append(menuitems.RatingMenuItem('rating', [items[(- 1)].name]))
items.append(sep('sep1', [items[(- 1)].name]))
def remove_tracks_cb(widget, name, playlistpage, context):
tracks = context['selected-items']
playlist = playlistpage.playlist
positions = [t[0] for t in tracks]
if (positions == list(range(positions[0], (positions[0] + len(positions))))):
del playlist[positions[0]:(positions[0] + len(positions))]
else:
for (position, track) in tracks[::(- 1)]:
del playlist[position]
items.append(smi('remove', [items[(- 1)].name], _('_Remove from Playlist'), 'list-remove', remove_tracks_cb))
items.append(RandomizeMenuItem([items[(- 1)].name]))
def playlist_menu_condition(name, parent, context):
scrolledwindow = parent.get_parent()
page = scrolledwindow.get_parent()
return (not page.tab.notebook.get_show_tabs())
items.append(smi('playlist-menu', [items[(- 1)].name], _('Playlist'), submenu=menu.ProviderMenu('playlist-tab-context-menu', None), condition_fn=playlist_menu_condition))
items.append(sep('sep2', [items[(- 1)].name]))
items.append(smi('properties', [items[(- 1)].name], _('_Track Properties'), 'document-properties', (lambda w, n, o, c: o.show_properties_dialog())))
for item in items:
providers.register('playlist-context-menu', item) |
class CreditCardExpires(FormValidator):
validate_partial_form = True
cc_expires_month_field = 'ccExpiresMonth'
cc_expires_year_field = 'ccExpiresYear'
__unpackargs__ = ('cc_expires_month_field', 'cc_expires_year_field')
datetime_module = None
messages = dict(notANumber=_('Please enter numbers only for month and year'), invalidNumber=_('Invalid Expiration Date'))
def validate_partial(self, field_dict, state):
if ((not field_dict.get(self.cc_expires_month_field, None)) or (not field_dict.get(self.cc_expires_year_field, None))):
return None
self._validate_python(field_dict, state)
def _validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
raise Invalid('<br>\n'.join((('%s: %s' % (name, value)) for (name, value) in sorted(errors.items()))), field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
ccExpiresMonth = str(field_dict[self.cc_expires_month_field]).strip()
ccExpiresYear = str(field_dict[self.cc_expires_year_field]).strip()
try:
ccExpiresMonth = int(ccExpiresMonth)
ccExpiresYear = int(ccExpiresYear)
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
today = datetime_makedate(dt_mod, now.year, now.month, now.day)
next_month = ((ccExpiresMonth % 12) + 1)
next_month_year = ccExpiresYear
if (next_month == 1):
next_month_year += 1
expires_date = datetime_makedate(dt_mod, next_month_year, next_month, 1)
assert (expires_date > today)
except ValueError:
return {self.cc_expires_month_field: self.message('notANumber', state), self.cc_expires_year_field: self.message('notANumber', state)}
except AssertionError:
return {self.cc_expires_month_field: self.message('invalidNumber', state), self.cc_expires_year_field: self.message('invalidNumber', state)} |
def _create_ofp_msg_ev_class(msg_cls):
name = _ofp_msg_name_to_ev_name(msg_cls.__name__)
if (name in _OFP_MSG_EVENTS):
return
cls = type(name, (EventOFPMsgBase,), dict(__init__=(lambda self, msg: super(self.__class__, self).__init__(msg))))
globals()[name] = cls
_OFP_MSG_EVENTS[name] = cls |
class _ErtDocumentation(SphinxDirective):
has_content = True
_CATEGORY_DEFAULT = 'other'
_SOURCE_PACKAGE_DEFAULT = 'PACKAGE NOT PROVIDED'
_DESCRIPTION_DEFAULT = ''
_CONFIG_FILE_DEFAULT = 'No config file provided'
_EXAMPLES_DEFAULT = ''
_PARSER_DEFAULT = None
def _divide_into_categories(jobs: Dict[(str, JobDoc)]) -> Dict[(str, Dict[(str, List[_ForwardModelDocumentation])])]:
categories: Dict[(str, Dict[(str, List[_ForwardModelDocumentation])])] = defaultdict((lambda : defaultdict(list)))
for (job_name, docs) in jobs.items():
if job_name.islower():
continue
category = docs.get('category', _ErtDocumentation._CATEGORY_DEFAULT)
split_categories = category.split('.')
if (len(split_categories) > 1):
(main_category, sub_category) = split_categories[0:2]
elif (len(split_categories) == 1):
(main_category, sub_category) = (split_categories[0], 'other')
else:
(main_category, sub_category) = ('other', 'other')
categories[main_category][sub_category].append(_ForwardModelDocumentation(name=job_name, category=category, job_source=docs.get('source_package', _ErtDocumentation._SOURCE_PACKAGE_DEFAULT), description=docs.get('description', _ErtDocumentation._DESCRIPTION_DEFAULT), job_config_file=docs.get('config_file', _ErtDocumentation._CONFIG_FILE_DEFAULT), examples=docs.get('examples', _ErtDocumentation._EXAMPLES_DEFAULT), parser=docs.get('parser', _ErtDocumentation._PARSER_DEFAULT)))
return dict({k: dict(v) for (k, v) in categories.items()})
def _create_forward_model_section_node(self, section_id: str, title: str) -> nodes.section:
node = _create_section_with_title(section_id=section_id, title=title)
_parse_string_list(self.content, node, self.state)
return node
def _generate_job_documentation(self, jobs: Dict[(str, JobDoc)], section_id: str, title: str) -> List[nodes.section]:
job_categories = _ErtDocumentation._divide_into_categories(jobs)
main_node = self._create_forward_model_section_node(section_id, title)
for (category_index, category) in enumerate(sorted(job_categories.keys())):
category_section_node = _create_section_with_title(section_id=(category + '-category'), title=category.capitalize())
sub_jobs_map = job_categories[category]
for (sub_i, sub) in enumerate(sorted(sub_jobs_map.keys())):
sub_section_node = _create_section_with_title(section_id=(((category + '-') + sub) + '-subcategory'), title=sub.capitalize())
for job in sub_jobs_map[sub]:
job_section_node = job.create_node(self.state)
sub_section_node.append(job_section_node)
if (sub_i < (len(sub_jobs_map) - 1)):
sub_section_node.append(nodes.transition())
category_section_node.append(sub_section_node)
main_node.append(category_section_node)
if (category_index < (len(job_categories) - 1)):
category_section_node.append(nodes.transition())
return [main_node] |
def arrays_to_strings(measure_json):
converted = {}
fields_to_convert = ['title', 'description', 'why_it_matters', 'numerator_columns', 'numerator_where', 'denominator_columns', 'denominator_where']
for (k, v) in measure_json.items():
if ((k in fields_to_convert) and isinstance(v, list)):
converted[k] = ' '.join(v)
else:
converted[k] = v
return converted |
class OptionSeriesTreegraphSonificationDefaultinstrumentoptionsActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
class FigureModelTrainingDataGenerator(AbstractDocumentModelTrainingDataGenerator):
def get_main_model(self, document_context: TrainingDataDocumentContext) -> Model:
return document_context.fulltext_models.figure_model
def iter_model_layout_documents(self, layout_document: LayoutDocument, document_context: TrainingDataDocumentContext) -> Iterable[LayoutDocument]:
fulltext_model = document_context.fulltext_models.fulltext_model
segmentation_label_result = get_segmentation_label_result(layout_document, document_context=document_context)
body_layout_document = segmentation_label_result.get_filtered_document_by_label('<body>').remove_empty_blocks()
if (not body_layout_document.pages):
return []
fulltext_labeled_layout_tokens = get_labeled_layout_tokens_for_model_and_layout_document(model=fulltext_model, layout_document=body_layout_document, document_context=document_context)
raw_figure_list = list(SemanticMixedContentWrapper(list(fulltext_model.iter_semantic_content_for_labeled_layout_tokens(fulltext_labeled_layout_tokens))).iter_by_type_recursively(SemanticRawFigure))
LOGGER.info('raw_figure_list count: %d', len(raw_figure_list))
if (not raw_figure_list):
return []
return [LayoutDocument.for_blocks(list(raw_figure.iter_blocks())) for raw_figure in raw_figure_list] |
class Solution():
def findMaximumXOR(self, nums):
root = TrieNode()
for num in nums:
node = root
for j in range(31, (- 1), (- 1)):
tmp = (num & (1 << j))
if tmp:
if (not node.one):
node.one = TrieNode()
node = node.one
else:
if (not node.zero):
node.zero = TrieNode()
node = node.zero
ans = 0
for num in nums:
node = root
tmp_val = 0
for j in range(31, (- 1), (- 1)):
tmp = (num & (1 << j))
if (node.one and node.zero):
if tmp:
node = node.zero
else:
node = node.one
tmp_val += (1 << j)
else:
if ((node.zero and tmp) or (node.one and (not tmp))):
tmp_val += (1 << j)
node = (node.one or node.zero)
ans = max(ans, tmp_val)
return ans |
class DummyPolicyWrapper(Policy, TorchModel):
def __init__(self, torch_policy: TorchPolicy):
self.torch_policy = torch_policy
super().__init__(device=torch_policy.device)
(Policy)
def seed(self, seed: int) -> None:
(Policy)
def needs_state(self) -> bool:
return False
(Policy)
def needs_env(self) -> bool:
return True
(Policy)
def compute_action(self, observation: ObservationType, maze_state: Optional[MazeStateType], env: MazeEnv, actor_id: Optional[ActorID]=None, deterministic: bool=False) -> ActionType:
(actions, probs) = self.torch_policy.compute_top_action_candidates(observation=observation, maze_state=maze_state, env=env, actor_id=actor_id, num_candidates=2)
return actions[(env.get_env_time() % 2)]
(Policy)
def compute_top_action_candidates(self, observation: ObservationType, num_candidates: Optional[int], maze_state: Optional[MazeStateType], env: Optional[BaseEnv], actor_id: Optional[ActorID]=None) -> Tuple[(Sequence[ActionType], Sequence[float])]:
raise NotImplementedError
(TorchModel)
def parameters(self) -> List[torch.Tensor]:
return self.torch_policy.parameters()
(TorchModel)
def eval(self) -> None:
self.torch_policy.eval()
(TorchModel)
def train(self) -> None:
self.torch_policy.train()
(TorchModel)
def to(self, device: str) -> None:
self.torch_policy.to(device)
(TorchModel)
def state_dict(self) -> Dict:
return self.torch_policy.state_dict()
(TorchModel)
def load_state_dict(self, state_dict: Dict) -> None:
self.torch_policy.load_state_dict(state_dict) |
_register_parser
_set_msg_type(ofproto.OFPT_FLOW_MOD)
class OFPFlowMod(MsgBase):
def __init__(self, datapath, cookie=0, cookie_mask=0, table_id=0, command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0, priority=0, buffer_id=ofproto.OFP_NO_BUFFER, out_port=0, out_group=0, flags=0, match=None, instructions=None):
instructions = (instructions if instructions else [])
super(OFPFlowMod, self).__init__(datapath)
self.cookie = cookie
self.cookie_mask = cookie_mask
self.table_id = table_id
self.command = command
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.buffer_id = buffer_id
self.out_port = out_port
self.out_group = out_group
self.flags = flags
if (match is None):
match = OFPMatch()
assert isinstance(match, OFPMatch)
self.match = match
for i in instructions:
assert isinstance(i, OFPInstruction)
self.instructions = instructions
def _serialize_body(self):
msg_pack_into(ofproto.OFP_FLOW_MOD_PACK_STR0, self.buf, ofproto.OFP_HEADER_SIZE, self.cookie, self.cookie_mask, self.table_id, self.command, self.idle_timeout, self.hard_timeout, self.priority, self.buffer_id, self.out_port, self.out_group, self.flags)
offset = (ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_MATCH_SIZE)
match_len = self.match.serialize(self.buf, offset)
offset += match_len
for inst in self.instructions:
inst.serialize(self.buf, offset)
offset += inst.len
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPFlowMod, cls).parser(datapath, version, msg_type, msg_len, xid, buf)
(msg.cookie, msg.cookie_mask, msg.table_id, msg.command, msg.idle_timeout, msg.hard_timeout, msg.priority, msg.buffer_id, msg.out_port, msg.out_group, msg.flags) = struct.unpack_from(ofproto.OFP_FLOW_MOD_PACK_STR0, msg.buf, ofproto.OFP_HEADER_SIZE)
offset = (ofproto.OFP_FLOW_MOD_SIZE - ofproto.OFP_HEADER_SIZE)
msg.match = OFPMatch.parser(buf, offset)
offset += utils.round_up(msg.match.length, 8)
instructions = []
while (offset < msg_len):
i = OFPInstruction.parser(buf, offset)
instructions.append(i)
offset += i.len
msg.instructions = instructions
return msg |
class OutdatedChrootMessage(Message):
def __init__(self, copr_chroots):
self.subject = '[Copr] upcoming deletion of outdated chroots in your projects'
self.text = "You have been notified because you are an admin of projects, that have some builds in outdated chroots\n\nAccording to the 'Copr outdated chroots removal policy'\n are going to be preserved {0} days after the chroot is EOL and then automatically deleted, unless you decide to prolong the expiration period.\n\nPlease, visit the projects settings if you want to extend the time.\n\n".format(app.config['DELETE_EOL_CHROOTS_AFTER'])
if (not copr_chroots):
raise AttributeError('No outdated chroots to notify about')
for chroot in copr_chroots:
url = helpers.fix_protocol_for_frontend(helpers.copr_url('coprs_ns.copr_repositories', chroot.copr, _external=True))
self.text += 'Project: {0}\nChroot: {1}\nRemaining: {2} days\n{3}\n\n'.format(chroot.copr.full_name, chroot.name, chroot.delete_after_days, url) |
class Item(metaclass=ItemType):
def parse(cls, html: str):
if cls._list:
result = HTMLParsing(html).list(cls._selector, cls.__fields__)
result = [cls._clean(item) for item in result]
else:
result = HTMLParsing(html).detail(cls.__fields__)
result = cls._clean(result)
return result
def _clean(cls, item):
for (name, selector) in cls.__fields__.items():
clean_method = getattr(cls, ('clean_%s' % name), None)
if (clean_method is not None):
item[name] = clean_method(cls, item[name])
return item |
class FirewallTransaction():
def __init__(self, fw):
self.fw = fw
self.rules = {}
self.pre_funcs = []
self.post_funcs = []
self.fail_funcs = []
self.modules = []
def clear(self):
self.rules.clear()
del self.pre_funcs[:]
del self.post_funcs[:]
del self.fail_funcs[:]
def add_rule(self, backend, rule):
self.rules.setdefault(backend.name, []).append(rule)
def add_rules(self, backend, rules):
for rule in rules:
self.add_rule(backend, rule)
def query_rule(self, backend, rule):
return ((backend.name in self.rules) and (rule in self.rules[backend.name]))
def remove_rule(self, backend, rule):
if ((backend.name in self.rules) and (rule in self.rules[backend.name])):
self.rules[backend.name].remove(rule)
def add_pre(self, func, *args):
self.pre_funcs.append((func, args))
def add_post(self, func, *args):
self.post_funcs.append((func, args))
def add_fail(self, func, *args):
self.fail_funcs.append((func, args))
def add_module(self, module):
if (module not in self.modules):
self.modules.append(module)
def remove_module(self, module):
if (module in self.modules):
self.modules.remove(module)
def add_modules(self, modules):
for module in modules:
self.add_module(module)
def remove_modules(self, modules):
for module in modules:
self.remove_module(module)
def execute(self, enable):
log.debug4(('%s.execute(%s)' % (type(self), enable)))
rules = self.rules
modules = self.modules
self.pre()
error = False
errorMsg = ''
done = []
for backend_name in rules:
try:
self.fw.rules(backend_name, rules[backend_name])
except Exception as msg:
error = True
errorMsg = msg
log.debug1(traceback.format_exc())
log.error(msg)
else:
done.append(backend_name)
if (not error):
module_return = self.fw.handle_modules(modules, enable)
if module_return:
(status, msg) = module_return
if status:
log.debug1(msg)
if error:
for (func, args) in self.fail_funcs:
try:
func(*args)
except Exception as msg:
log.debug1(traceback.format_exc())
log.error(('Calling fail func %s(%s) failed: %s' % (func, args, msg)))
raise FirewallError(errors.COMMAND_FAILED, errorMsg)
self.post()
def pre(self):
log.debug4(('%s.pre()' % type(self)))
for (func, args) in self.pre_funcs:
func(*args)
def post(self):
log.debug4(('%s.post()' % type(self)))
for (func, args) in self.post_funcs:
func(*args) |
def validate_EIP712Domain_schema(structured_data):
if ('EIP712Domain' not in structured_data['types']):
raise ValidationError('`EIP712Domain struct` not found in types attribute')
EIP712Domain_data = structured_data['types']['EIP712Domain']
header_fields = used_header_fields(EIP712Domain_data)
if (len(header_fields) == 0):
raise ValidationError(f'One of {EIP712_DOMAIN_FIELDS} must be defined in {structured_data}')
for field in header_fields:
validate_field_declared_only_once_in_struct(field, EIP712Domain_data, 'EIP712Domain') |
('aea.cli.registry.fetch.open_file', mock.mock_open())
('aea.cli.utils.decorators._cast_ctx')
('aea.cli.registry.fetch.PublicId', PublicIdMock)
('aea.cli.registry.fetch.os.rename')
('aea.cli.registry.fetch.os.makedirs')
('aea.cli.registry.fetch.try_to_load_agent_config')
('aea.cli.registry.fetch.download_file', return_value='filepath')
('aea.cli.registry.fetch.extract')
class TestFetchAgent(TestCase):
def setup_class(cls):
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
('aea.cli.registry.fetch.request_api', return_value={'file': 'url', 'connections': [], 'contracts': [], 'protocols': [], 'skills': []})
def test_fetch_agent_positive(self, request_api_mock, extract_mock, download_file_mock, *mocks):
public_id_mock = PublicIdMock()
fetch_agent(ContextMock(), public_id_mock, alias='alias')
request_api_mock.assert_called_with('GET', '/agents/{}/{}/{}'.format(public_id_mock.author, public_id_mock.name, public_id_mock.version))
download_file_mock.assert_called_once_with('url', 'cwd')
extract_mock.assert_called_once_with('filepath', 'cwd')
('aea.cli.registry.fetch.add_item')
('aea.cli.registry.fetch.request_api', return_value={'file': 'url', 'connections': ['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)], 'contracts': ['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)], 'protocols': ['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)], 'skills': ['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)]})
def test_fetch_agent_with_dependencies_positive(self, request_api_mock, add_item_mock, extract_mock, download_file_mock, *mocks):
public_id_mock = PublicIdMock()
ctx_mock = ContextMock(connections=['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)])
fetch_agent(ctx_mock, public_id_mock)
request_api_mock.assert_called_with('GET', '/agents/{}/{}/{}'.format(public_id_mock.author, public_id_mock.name, public_id_mock.version))
download_file_mock.assert_called_once_with('url', 'cwd')
extract_mock.assert_called_once_with('filepath', 'cwd')
add_item_mock.assert_called()
('aea.cli.registry.fetch.add_item', _raise_exception)
('aea.cli.registry.fetch.request_api', return_value={'file': 'url', 'connections': ['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)], 'contracts': [], 'protocols': [], 'skills': []})
def test_fetch_agent_with_dependencies_unable_to_fetch(self, *mocks):
ctx_mock = ContextMock(connections=['public/id:{}'.format(PublicIdMock.DEFAULT_VERSION)])
with self.assertRaises(ClickException):
fetch_agent(ctx_mock, PublicIdMock())
def teardown_class(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
class TestsUtilsTest(LogCaptureTestCase):
def testmbasename(self):
self.assertEqual(mbasename('sample.py'), 'sample')
self.assertEqual(mbasename('/long/path/sample.py'), 'sample')
self.assertEqual(mbasename('/long/path/__init__.py'), 'path.__init__')
self.assertEqual(mbasename('/long/path/base.py'), 'path.base')
self.assertEqual(mbasename('/long/path/base'), 'path.base')
def testUniConverters(self):
self.assertRaises(Exception, uni_decode, b'test', 'f2b-test::non-existing-encoding')
uni_decode(b'test\xcf')
uni_string(b'test\xcf')
uni_string('testI')
def testSafeLogging(self):
logSys = DefLogSys
class Test():
def __init__(self, err=1):
self.err = err
def __repr__(self):
if self.err:
raise Exception('no representation for test!')
else:
return 'conv-error (oaoee), unterminated utf I'
test = Test()
logSys.log(logging.NOTICE, 'test 1a: %r', test)
self.assertLogged('Traceback', 'no representation for test!')
self.pruneLog()
logSys.notice('test 1b: %r', test)
self.assertLogged('Traceback', 'no representation for test!')
self.pruneLog(('[phase 2] test error conversion by encoding %s' % sys.getdefaultencoding()))
test = Test(0)
logSys.log(logging.NOTICE, 'test 2a: %r, %s', test, test)
self.assertLogged('test 2a', 'Error by logging handler', all=False)
logSys.notice('test 2b: %r, %s', test, test)
self.assertLogged('test 2b', 'Error by logging handler', all=False)
self.pruneLog('[phase 3] test unexpected error in handler')
class _ErrorHandler(logging.Handler):
def handle(self, record):
raise Exception('error in handler test!')
_org_handler = logSys.handlers
try:
logSys.handlers = list(logSys.handlers)
logSys.handlers += [_ErrorHandler()]
logSys.log(logging.NOTICE, 'test 3a')
logSys.notice('test 3b')
finally:
logSys.handlers = _org_handler
self.pruneLog('OK')
def testTraceBack(self):
for compress in (True, False):
tb = TraceBack(compress=compress)
def func_raise():
raise ValueError()
def deep_function(i):
if i:
deep_function((i - 1))
else:
func_raise()
try:
print(deep_function(3))
except ValueError:
s = tb()
if (not ('fail2ban-testcases' in s)):
self.assertIn('>', s)
elif (not ('coverage' in s)):
self.assertNotIn('>', s)
self.assertIn(':', s)
def _testAssertionErrorRE(self, regexp, fun, *args, **kwargs):
self.assertRaisesRegex(AssertionError, regexp, fun, *args, **kwargs)
def testExtendedAssertRaisesRE(self):
def _key_err(msg):
raise KeyError(msg)
self.assertRaises(KeyError, self._testAssertionErrorRE, '^failed$', _key_err, 'failed')
self.assertRaises(AssertionError, self._testAssertionErrorRE, '^failed$', self.fail, '__failed__')
self._testAssertionErrorRE('failed.* does not match .*__failed__', (lambda : self._testAssertionErrorRE('^failed$', self.fail, '__failed__')))
self.assertRaises(AssertionError, self._testAssertionErrorRE, '', int, 1)
self._testAssertionErrorRE('0 AssertionError not raised X.* does not match .*AssertionError not raised', (lambda : self._testAssertionErrorRE('^0 AssertionError not raised X$', (lambda : self._testAssertionErrorRE('', int, 1)))))
def testExtendedAssertMethods(self):
self.assertIn('a', ['a', 'b', 'c', 'd'])
self.assertIn('a', ('a', 'b', 'c', 'd'))
self.assertIn('a', 'cba')
self.assertIn('a', (c for c in 'cba' if (c != 'b')))
self.assertNotIn('a', ['b', 'c', 'd'])
self.assertNotIn('a', ('b', 'c', 'd'))
self.assertNotIn('a', 'cbd')
self.assertNotIn('a', (c.upper() for c in 'cba' if (c != 'b')))
self._testAssertionErrorRE("'a' unexpectedly found in 'cba'", self.assertNotIn, 'a', 'cba')
self._testAssertionErrorRE('1 unexpectedly found in \\[0, 1, 2\\]', self.assertNotIn, 1, range(3))
self._testAssertionErrorRE("'A' unexpectedly found in \\['C', 'A'\\]", self.assertNotIn, 'A', (c.upper() for c in 'cba' if (c != 'b')))
self._testAssertionErrorRE("'a' was not found in 'xyz'", self.assertIn, 'a', 'xyz')
self._testAssertionErrorRE('5 was not found in \\[0, 1, 2\\]', self.assertIn, 5, range(3))
self._testAssertionErrorRE("'A' was not found in \\['C', 'B'\\]", self.assertIn, 'A', (c.upper() for c in 'cba' if (c != 'a')))
logSys = DefLogSys
self.pruneLog()
logSys.debug('test "xyz"')
self.assertLogged('test "xyz"')
self.assertLogged('test', 'xyz', all=True)
self.assertNotLogged('test', 'zyx', all=False)
self.assertNotLogged('test_zyx', 'zyx', all=True)
self.assertLogged('test', 'zyx', all=False)
self.pruneLog()
logSys.debug('xxxx "xxx"')
self.assertNotLogged('test "xyz"')
self.assertNotLogged('test', 'xyz', all=False)
self.assertNotLogged('test', 'xyz', 'zyx', all=True)
(orgfast, unittest.F2B.fast) = (unittest.F2B.fast, False)
self.assertFalse(isinstance(unittest.F2B.maxWaitTime(True), bool))
self.assertEqual(unittest.F2B.maxWaitTime((lambda : 50))(), 50)
self.assertEqual(unittest.F2B.maxWaitTime(25), 25)
self.assertEqual(unittest.F2B.maxWaitTime(25.0), 25.0)
unittest.F2B.fast = True
try:
self.assertEqual(unittest.F2B.maxWaitTime((lambda : 50))(), 50)
self.assertEqual(unittest.F2B.maxWaitTime(25), 2.5)
self.assertEqual(unittest.F2B.maxWaitTime(25.0), 25.0)
finally:
unittest.F2B.fast = orgfast
self.assertFalse(unittest.F2B.maxWaitTime(False))
self.pruneLog()
logSys.debug('test "xyz"')
self._testAssertionErrorRE('.* was found in the log', self.assertNotLogged, 'test "xyz"')
self._testAssertionErrorRE('All of the .* were found present in the log', self.assertNotLogged, 'test "xyz"', 'test')
self._testAssertionErrorRE('was found in the log', self.assertNotLogged, 'test', 'xyz', all=True)
self._testAssertionErrorRE('was not found in the log', self.assertLogged, 'test', 'zyx', all=True)
self._testAssertionErrorRE('was not found in the log, waited 1e-06', self.assertLogged, 'test', 'zyx', all=True, wait=1e-06)
self._testAssertionErrorRE('None among .* was found in the log', self.assertLogged, 'test_zyx', 'zyx', all=False)
self._testAssertionErrorRE('None among .* was found in the log, waited 1e-06', self.assertLogged, 'test_zyx', 'zyx', all=False, wait=1e-06)
self._testAssertionErrorRE('All of the .* were found present in the log', self.assertNotLogged, 'test', 'xyz', all=False)
self.assertDictEqual({'A': [1, 2]}, {'A': [1, 2]})
self.assertRaises(AssertionError, self.assertDictEqual, {'A': [1, 2]}, {'A': [2, 1]})
self.assertSortedEqual(['A', 'B'], ['B', 'A'])
self.assertSortedEqual([['A', 'B']], [['B', 'A']], level=2)
self.assertSortedEqual([['A', 'B']], [['B', 'A']], nestedOnly=False)
self.assertRaises(AssertionError, (lambda : self.assertSortedEqual([['A', 'B']], [['B', 'A']], level=1, nestedOnly=True)))
self.assertSortedEqual({'A': ['A', 'B']}, {'A': ['B', 'A']}, nestedOnly=False)
self.assertRaises(AssertionError, (lambda : self.assertSortedEqual({'A': ['A', 'B']}, {'A': ['B', 'A']}, level=1, nestedOnly=True)))
self.assertSortedEqual(['Z', {'A': ['B', 'C'], 'B': ['E', 'F']}], [{'B': ['F', 'E'], 'A': ['C', 'B']}, 'Z'], nestedOnly=False)
self.assertSortedEqual(['Z', {'A': ['B', 'C'], 'B': ['E', 'F']}], [{'B': ['F', 'E'], 'A': ['C', 'B']}, 'Z'], level=(- 1))
self.assertRaises(AssertionError, (lambda : self.assertSortedEqual(['Z', {'A': ['B', 'C'], 'B': ['E', 'F']}], [{'B': ['F', 'E'], 'A': ['C', 'B']}, 'Z'], nestedOnly=True)))
self.assertSortedEqual((0, [['A1'], ['A2', 'A1'], []]), (0, [['A1'], ['A1', 'A2'], []]))
self.assertSortedEqual(list('ABC'), list('CBA'))
self.assertRaises(AssertionError, self.assertSortedEqual, ['ABC'], ['CBA'])
self.assertRaises(AssertionError, self.assertSortedEqual, [['ABC']], [['CBA']])
self._testAssertionErrorRE("\\['A'\\] != \\['C', 'B'\\]", self.assertSortedEqual, ['A'], ['C', 'B'])
self._testAssertionErrorRE("\\['A', 'B'\\] != \\['B', 'C'\\]", self.assertSortedEqual, ['A', 'B'], ['C', 'B'])
def testVerbosityFormat(self):
self.assertEqual(getVerbosityFormat(1), '%(asctime)s %(name)-24s[%(process)d]: %(levelname)-7s %(message)s')
self.assertEqual(getVerbosityFormat(1, padding=False), '%(asctime)s %(name)s[%(process)d]: %(levelname)s %(message)s')
self.assertEqual(getVerbosityFormat(1, addtime=False, padding=False), '%(name)s[%(process)d]: %(levelname)s %(message)s')
def testFormatterWithTraceBack(self):
strout = StringIO()
Formatter = FormatterWithTraceBack
fmt = ' %(tb)s | %(tbc)s : %(message)s'
logSys = getLogger('fail2ban_tests')
out = logging.StreamHandler(strout)
out.setFormatter(Formatter(fmt))
logSys.addHandler(out)
logSys.error('XXX')
s = strout.getvalue()
self.assertTrue(s.rstrip().endswith(': XXX'))
pindex = s.index('|')
self.assertTrue((pindex > 10))
self.assertEqual(s[:pindex], s[(pindex + 1):((pindex * 2) + 1)])
def testLazyLogging(self):
logSys = DefLogSys
logSys.debug('lazy logging: %r', unittest.F2B.log_lazy)
logSys.notice('test', 1, 2, 3)
self.assertLogged('not all arguments converted') |
class NotificationReceiver(object):
SUPPORTED_PDU_TYPES = (v1.TrapPDU.tagSet, v2c.SNMPv2TrapPDU.tagSet, v2c.InformRequestPDU.tagSet)
def __init__(self, snmpEngine, cbFun, cbCtx=None):
snmpEngine.msgAndPduDsp.registerContextEngineId(null, self.SUPPORTED_PDU_TYPES, self.processPdu)
self.__snmpTrapCommunity = ''
self.__cbFun = cbFun
self.__cbCtx = cbCtx
def storeSnmpTrapCommunity(snmpEngine, execpoint, variables, cbCtx):
self.__snmpTrapCommunity = variables.get('communityName', '')
snmpEngine.observer.registerObserver(storeSnmpTrapCommunity, 'rfc2576.processIncomingMsg')
def close(self, snmpEngine):
snmpEngine.msgAndPduDsp.unregisterContextEngineId(null, self.SUPPORTED_PDU_TYPES)
self.__cbFun = self.__cbCtx = None
def processPdu(self, snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, maxSizeResponseScopedPDU, stateReference):
if (messageProcessingModel == 0):
origPdu = PDU
PDU = rfc2576.v1ToV2(PDU, snmpTrapCommunity=self.__snmpTrapCommunity)
else:
origPdu = None
errorStatus = 'noError'
errorIndex = 0
varBinds = v2c.apiPDU.getVarBinds(PDU)
((debug.logger & debug.FLAG_APP) and debug.logger(('processPdu: stateReference %s, varBinds %s' % (stateReference, varBinds))))
if (PDU.tagSet in rfc3411.CONFIRMED_CLASS_PDUS):
rspPDU = v2c.apiPDU.getResponse(PDU)
v2c.apiPDU.setErrorStatus(rspPDU, errorStatus)
v2c.apiPDU.setErrorIndex(rspPDU, errorIndex)
v2c.apiPDU.setVarBinds(rspPDU, varBinds)
((debug.logger & debug.FLAG_APP) and debug.logger(('processPdu: stateReference %s, confirm PDU %s' % (stateReference, rspPDU.prettyPrint()))))
if (messageProcessingModel == 0):
rspPDU = rfc2576.v2ToV1(rspPDU, origPdu)
statusInformation = {}
try:
snmpEngine.msgAndPduDsp.returnResponsePdu(snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, rspPDU, maxSizeResponseScopedPDU, stateReference, statusInformation)
except error.StatusInformation as exc:
((debug.logger & debug.FLAG_APP) and debug.logger(('processPdu: stateReference %s, statusInformation %s' % (stateReference, exc))))
mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
(snmpSilentDrops,) = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
elif (PDU.tagSet in rfc3411.UNCONFIRMED_CLASS_PDUS):
pass
else:
raise error.ProtocolError(('Unexpected PDU class %s' % PDU.tagSet))
((debug.logger & debug.FLAG_APP) and debug.logger(('processPdu: stateReference %s, user cbFun %s, cbCtx %s, varBinds %s' % (stateReference, self.__cbFun, self.__cbCtx, varBinds))))
self.__cbFun(snmpEngine, stateReference, contextEngineId, contextName, varBinds, self.__cbCtx) |
def migrate_cluster(cluster):
for (old, new) in [('_user_key_public', 'user_key_public'), ('_user_key_private', 'user_key_private'), ('_user_key_name', 'user_key_name')]:
if hasattr(cluster, old):
setattr(cluster, new, getattr(cluster, old))
delattr(cluster, old)
for (kind, nodes) in cluster.nodes.items():
for node in nodes:
if hasattr(node, 'image'):
image_id = (getattr(node, 'image_id', None) or node.image)
setattr(node, 'image_id', image_id)
delattr(node, 'image')
if (not hasattr(cluster, 'thread_pool_max_size')):
cluster.thread_pool_max_size = 10
return cluster |
def extractYeboisnovelsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
.parametrize(('words', 'score_expected'), [((' 85.0), (('www.github,com',), 100.0), (('some random words',), 0.0)])
def test_url_magic_score(ocr_result, words, score_expected):
ocr_result.words = [{'text': w} for w in words]
magic = UrlMagic()
score = magic.score(ocr_result)
assert (score == score_expected) |
class OptionSeriesDumbbellSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionSeriesDumbbellSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesDumbbellSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesDumbbellSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesDumbbellSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionSeriesDumbbellSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesDumbbellSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
def check_updates_expected(app):
if ((app.get('NoSourceSince') or (app.get('ArchivePolicy') == 0)) and (not all(((app.get(key, 'None') == 'None') for key in ('AutoUpdateMode', 'UpdateCheckMode'))))):
(yield _('App has NoSourceSince or ArchivePolicy "0 versions" or 0 but AutoUpdateMode or UpdateCheckMode are not None')) |
def select_outgroups(target, n2content, splitterconf):
name2dist = {'min': _min, 'max': _max, 'mean': _mean, 'median': _median}
out_topodist = tobool(splitterconf['_outgroup_topology_dist'])
optimal_out_size = int(splitterconf['_max_outgroup_size'])
out_min_support = float(splitterconf['_outgroup_min_support'])
if (not target.up):
raise TaskError(None, 'Cannot select outgroups for the root node!')
if (not optimal_out_size):
raise TaskError(None, 'You are trying to set 0 outgroups!')
n2targetdist = distance_matrix_new(target, leaf_only=False, topological=out_topodist)
score = (lambda _n: (_n.support, (1 - (abs((optimal_out_size - len(n2content[_n]))) / float(max(optimal_out_size, len(n2content[_n]))))), (1 - (n2targetdist[_n] / max_dist))))
def sort_outgroups(x, y):
score_x = set(score(x))
score_y = set(score(y))
while score_x:
min_score_x = min(score_x)
v = cmp(min_score_x, min(score_y))
if (v == 0):
score_x.discard(min_score_x)
score_y.discard(min_score_x)
else:
break
if (v == 0):
v = cmp(x.cladeid, y.cladeid)
return v
max_dist = max(n2targetdist.values())
valid_nodes = [n for n in n2targetdist if ((not (n2content[n] & n2content[target])) and (n.support >= out_min_support))]
if (not valid_nodes):
raise TaskError(None, ('Could not find a suitable outgroup (min_support=%s)' % out_min_support))
valid_nodes.sort(sort_outgroups, reverse=True)
best_outgroup = valid_nodes[0]
seqs = [n.name for n in n2content[target]]
outs = [n.name for n in n2content[best_outgroup]]
log.log(20, 'Found possible outgroup of size %s: score (support,size,dist)=%s', len(outs), score(best_outgroup))
log.log(20, 'Supports: %0.2f (children=%s)', best_outgroup.support, ','.join([('%0.2f' % ch.support) for ch in best_outgroup.children]))
if DEBUG():
root = target.root
for _seq in outs:
tar = (root & _seq)
tar.img_style['fgcolor'] = 'green'
tar.img_style['size'] = 12
tar.img_style['shape'] = 'circle'
target.img_style['bgcolor'] = 'lightblue'
NPR_TREE_STYLE.title.clear()
NPR_TREE_STYLE.title.add_face(faces.TextFace('MainTree: Outgroup selection is mark in green. Red=optimized nodes ', fgcolor='blue'), 0)
root.show(tree_style=NPR_TREE_STYLE)
for _n in root.traverse():
_n.img_style = None
return (set(seqs), set(outs)) |
def _get_apk_icons_src(apkfile, icon_name):
icons_src = dict()
density_re = re.compile('^res/(.*)/{}\\.png$'.format(icon_name))
with zipfile.ZipFile(apkfile) as zf:
for filename in zf.namelist():
m = density_re.match(filename)
if m:
folder = m.group(1).split('-')
try:
density = screen_resolutions[folder[1]]
except Exception:
density = '160'
icons_src[density] = m.group(0)
if ((icons_src.get('-1') is None) and ('160' in icons_src)):
icons_src['-1'] = icons_src['160']
return icons_src |
class hw_at_t0(object):
def uOfXT(self, X, t):
hTilde = solitary_wave(X[0], 0)
h = max((hTilde - bathymetry_function(X)), 0.0)
hTildePrime = ((((- 2.0) * z) * hTilde) * np.tanh((z * ((X[0] - x0) - (c * t)))))
hw = ((- (h ** 2)) * (((c * h0) * hTildePrime) / ((h0 + hTilde) ** 2)))
return hw |
class OptionSeriesDumbbellLabel(Options):
def boxesToAvoid(self):
return self._config_get(None)
def boxesToAvoid(self, value: Any):
self._config(value, js_type=False)
def connectorAllowed(self):
return self._config_get(False)
def connectorAllowed(self, flag: bool):
self._config(flag, js_type=False)
def connectorNeighbourDistance(self):
return self._config_get(24)
def connectorNeighbourDistance(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get('undefined')
def formatter(self, value: Any):
self._config(value, js_type=False)
def maxFontSize(self):
return self._config_get(None)
def maxFontSize(self, num: float):
self._config(num, js_type=False)
def minFontSize(self):
return self._config_get(None)
def minFontSize(self, num: float):
self._config(num, js_type=False)
def onArea(self):
return self._config_get(None)
def onArea(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionSeriesDumbbellLabelStyle':
return self._config_sub_data('style', OptionSeriesDumbbellLabelStyle)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False) |
class FailTicket(Ticket):
def __init__(self, ip=None, time=None, matches=None, data={}, ticket=None):
self._firstTime = None
self._retry = 1
Ticket.__init__(self, ip, time, matches, data, ticket)
if (not isinstance(ticket, FailTicket)):
self._firstTime = (time if (time is not None) else self.getTime())
self._retry = self._data.get('failures', 1)
def setRetry(self, value):
self._retry = value
if (not self._data['failures']):
self._data['failures'] = 1
if (not value):
self._data['failures'] = 0
self._data['matches'] = []
def getRetry(self):
return self._retry
def adjustTime(self, time, maxTime):
if (time > self._time):
if (self._firstTime < (time - maxTime)):
self._retry = int(round(((self._retry / float((time - self._firstTime))) * maxTime)))
self._firstTime = (time - maxTime)
self._time = time
def inc(self, matches=None, attempt=1, count=1):
self._retry += count
self._data['failures'] += attempt
if matches:
if self._data['matches']:
self._data['matches'] = (self._data['matches'] + matches)
else:
self._data['matches'] = matches
def wrap(o):
o.__class__ = FailTicket
return o |
_request
def after_request_func(response):
origin = request.headers.get('Origin')
if (request.method == 'OPTIONS'):
response = make_response()
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Credentials', 'true')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
return response |
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[(str, int, int)]]:
prev_tag = 'O'
prev_start = 0
for (index, prefixed_tag) in enumerate(seq):
(prefix, tag) = get_split_prefix_label(prefixed_tag)
if ((prefix == 'B') or (tag != prev_tag)):
if (prev_start < index):
(yield (prev_tag, prev_start, (index - 1)))
prev_tag = tag
prev_start = index
if (prev_start < len(seq)):
(yield (prev_tag, prev_start, (len(seq) - 1))) |
class bad_request_error_msg(error_msg):
version = 1
type = 1
err_type = 1
def __init__(self, xid=None, code=None, data=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (code != None):
self.code = code
else:
self.code = 0
if (data != None):
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.err_type))
packed.append(struct.pack('!H', self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bad_request_error_msg()
_version = reader.read('!B')[0]
assert (_version == 1)
_type = reader.read('!B')[0]
assert (_type == 1)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_err_type = reader.read('!H')[0]
assert (_err_type == 1)
obj.code = reader.read('!H')[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.code != other.code):
return False
if (self.data != other.data):
return False
return True
def pretty_print(self, q):
q.text('bad_request_error_msg {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('code = ')
value_name_map = {0: 'OFPBRC_BAD_VERSION', 1: 'OFPBRC_BAD_TYPE', 2: 'OFPBRC_BAD_STAT', 3: 'OFPBRC_BAD_EXPERIMENTER', 4: 'OFPBRC_BAD_SUBTYPE', 5: 'OFPBRC_EPERM', 6: 'OFPBRC_BAD_LEN', 7: 'OFPBRC_BUFFER_EMPTY', 8: 'OFPBRC_BUFFER_UNKNOWN'}
if (self.code in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.code], self.code)))
else:
q.text(('%#x' % self.code))
q.text(',')
q.breakable()
q.text('data = ')
q.pp(self.data)
q.breakable()
q.text('}') |
class Environment():
def __init__(self, target: str, project_dir: str):
self.dbt_runner = dbt_project.get_dbt_runner(target, project_dir)
def clear(self):
self.dbt_runner.run_operation('elementary_tests.clear_env')
def init(self):
self.dbt_runner.run(selector='init')
self.dbt_runner.run(select='elementary') |
class OptionPlotoptionsVariablepieAccessibility(Options):
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def descriptionFormat(self):
return self._config_get(None)
def descriptionFormat(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def exposeAsGroupOnly(self):
return self._config_get(None)
def exposeAsGroupOnly(self, flag: bool):
self._config(flag, js_type=False)
def keyboardNavigation(self) -> 'OptionPlotoptionsVariablepieAccessibilityKeyboardnavigation':
return self._config_sub_data('keyboardNavigation', OptionPlotoptionsVariablepieAccessibilityKeyboardnavigation)
def point(self) -> 'OptionPlotoptionsVariablepieAccessibilityPoint':
return self._config_sub_data('point', OptionPlotoptionsVariablepieAccessibilityPoint) |
('ecs_deploy.ecs.logger')
(EcsClient, '__init__')
def test_is_not_deployed_with_failed_tasks(client, logger, service_with_failed_tasks):
client.list_tasks.return_value = RESPONSE_LIST_TASKS_0
action = EcsAction(client, CLUSTER_NAME, SERVICE_NAME)
action.is_deployed(service_with_failed_tasks)
logger.warning.assert_called_once_with('3 tasks failed to start') |
class Command(BaseCommand):
help = _('Connect with client as subscriber, for real time update proposed')
client_db = None
create_if_not_exist = False
use_update = False
def add_arguments(self, parser):
parser.add_argument('topic', action='store', type=str, default=None, help=_('Subcribe topic'))
parser.add_argument('--id', action='store', type=int, default=None, dest='id', help=_('id from DB object'))
parser.add_argument('--qos', action='store', type=int, default=0, dest='qos', help=_('Quality of Service'))
parser.add_argument('--client_id', action='store', type=str, default=None, dest='client_id', help=_('client_id for broken'))
parser.add_argument('--update', action='store_true', default=False, dest='update', help=_('Use update method to save the updates, this will not run the django signals'))
def handle(self, *args, **options):
if (not options['topic']):
raise CommandError(_('Topic requiered and must be only one'))
apply_filter = {}
self.use_update = options['update']
db_client_id = options['id']
if (db_client_id is None):
if options['client_id']:
apply_filter['client_id'] = options['client_id']
clients = Client.objects.filter(**apply_filter)
if (clients.count() == 1):
db_client_id = clients.all()[0].pk
else:
if (clients.all().count() == 0):
raise CommandError(_('No client on DB'))
self.stdout.write('id -> client')
for obj in clients.all():
self.stdout.write('{} \t-> {}'.format(obj.pk, obj))
db_client_id = input('Select id from DB: ')
self.stdout.write('Started')
try:
client_db = Client.objects.get(pk=db_client_id)
self.client_db = client_db
cli = client_db.get_mqtt_client()
cli.on_message = self.on_message
cli.connect(client_db.server.host, client_db.server.port, client_db.keepalive)
cli.subscribe(options['topic'], options['qos'])
cli.loop_forever()
cli.disconnect()
except Client.DoesNotExist:
raise CommandError(_('Client not exist'))
def on_message(self, client, userdata, message):
if (not self.client_db):
return
self.stdout.write('New message to {}'.format(message.topic))
topics = Topic.objects.filter(name=message.topic)
topic = None
if topics.exists():
topic = topics.get()
elif self.create_if_not_exist:
topic = Topic.objects.create(name=message.topic)
if (not topic):
return
datas = Data.objects.filter(topic=topic, client=self.client_db)
data = None
if (datas.count() == 1):
data = datas.get()
elif self.create_if_not_exist:
data = Data.objects.create(topic=topic, client=self.client_db)
if data:
if self.use_update:
Data.objects.filter(pk=data.pk).update(payload=message.payload, qos=message.qos)
else:
data.payload = message.payload
data.qos = message.qos
data.save()
self.stdout.write('Updated topic {}'.format(message.topic)) |
class AmazonImageApi(ImageInterface):
def image__object_detection(self, file: str, model: str=None, file_url: str='') -> ResponseType[ObjectDetectionDataClass]:
with open(file, 'rb') as file_:
file_content = file_.read()
payload = {'Image': {'Bytes': file_content}, 'MinConfidence': 70}
original_response = handle_amazon_call(self.clients['image'].detect_labels, **payload)
items = []
for object_label in original_response.get('Labels'):
if object_label.get('Instances'):
bounding_box = object_label.get('Instances')[0].get('BoundingBox')
(x_min, x_max) = (bounding_box.get('Left'), (bounding_box.get('Left') + bounding_box.get('Width')))
(y_min, y_max) = (bounding_box.get('Top'), (bounding_box.get('Top') + bounding_box.get('Height')))
else:
(x_min, x_max, y_min, y_max) = (None, None, None, None)
items.append(ObjectItem(label=object_label.get('Name'), confidence=(object_label.get('Confidence') / 100), x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max))
return ResponseType[ObjectDetectionDataClass](original_response=original_response, standardized_response=ObjectDetectionDataClass(items=items))
def image__face_detection(self, file: str, file_url: str='') -> ResponseType[FaceDetectionDataClass]:
with open(file, 'rb') as file_:
file_content = file_.read()
payload = {'Image': {'Bytes': file_content}, 'Attributes': ['ALL']}
original_response = handle_amazon_call(self.clients['image'].detect_faces, **payload)
faces_list = []
for face in original_response.get('FaceDetails', []):
age_output = None
age_range = face.get('AgeRange')
if age_range:
age_output = ((age_range.get('Low', 0.0) + age_range.get('High', 100)) / 2)
features = FaceFeatures(eyes_open=(face.get('eyes_open', {}).get('Confidence', 0.0) / 100), smile=(face.get('smile', {}).get('Confidence', 0.0) / 100), mouth_open=(face.get('mouth_open', {}).get('Confidence', 0.0) / 100))
accessories = FaceAccessories(sunglasses=(face.get('Sunglasses', {}).get('Confidence', 0.0) / 100), eyeglasses=(face.get('Eyeglasses', {}).get('Confidence', 0.0) / 100), reading_glasses=None, swimming_goggles=None, face_mask=None, headwear=None)
facial_hair = FaceFacialHair(moustache=(face.get('Mustache', {}).get('Confidence', 0.0) / 100), beard=(face.get('Beard', {}).get('Confidence', 0.0) / 100), sideburns=None)
quality = FaceQuality(brightness=(face.get('Quality').get('Brightness', 0.0) / 100), sharpness=(face.get('Quality').get('Sharpness', 0.0) / 100), noise=None, exposure=None, blur=None)
emotion_output = {}
for emo in face.get('Emotions', []):
normalized_emo = (emo.get('Confidence', 0.0) * 100)
if emo.get('Type'):
if (emo.get('Type').lower() == 'happy'):
emo['Type'] = 'happiness'
emotion_output[emo.get('Type').lower()] = standardized_confidence_score((normalized_emo / 100))
emotions = FaceEmotions(anger=emotion_output.get('angry'), surprise=emotion_output.get('surprise'), fear=emotion_output.get('fear'), sorrow=emotion_output.get('sadness'), confusion=emotion_output.get('confused'), calm=emotion_output.get('calm'), disgust=emotion_output.get('disgsusted'), joy=emotion_output.get('happiness'), unknown=None, neutral=None, contempt=None)
landmarks_output = {}
for land in face.get('Landmarks'):
if (land.get('Type') and land.get('X') and land.get('Y')):
landmarks_output[land.get('Type')] = [land.get('X'), land.get('Y')]
landmarks = FaceLandmarks(left_eye=landmarks_output.get('eye_left', []), left_eye_top=landmarks_output.get('eye_leftUp', []), left_eye_right=landmarks_output.get('lefteye_right', []), left_eye_bottom=landmarks_output.get('leftEyeDown', []), left_eye_left=landmarks_output.get('leftEyeLeft', []), right_eye=landmarks_output.get('eye_right', []), right_eye_top=landmarks_output.get('eye_rightUp', []), right_eye_right=landmarks_output.get('eye_rightRight', []), right_eye_bottom=landmarks_output.get('rightEyeDown', []), right_eye_left=landmarks_output.get('rightEyeLeft', []), left_eyebrow_left=landmarks_output.get('leftEyeBrowLeft', []), left_eyebrow_right=landmarks_output.get('leftEyeBrowRight', []), left_eyebrow_top=landmarks_output.get('leftEyeBrowUp', []), right_eyebrow_left=landmarks_output.get('rightEyeBrowLeft', []), right_eyebrow_right=landmarks_output.get('rightEyeBrowRight', []), right_eyebrow_top=landmarks_output.get('rightEyeBrowUp', []), left_pupil=landmarks_output.get('leftPupil', []), right_pupil=landmarks_output.get('rightPupil', []), nose_tip=landmarks_output.get('nose', []), nose_bottom_right=landmarks_output.get('noseRight', []), nose_bottom_left=landmarks_output.get('noseLeft', []), mouth_left=landmarks_output.get('mouth_left', []), mouth_right=landmarks_output.get('mouth_right', []), mouth_top=landmarks_output.get('mouthUp', []), mouth_bottom=landmarks_output.get('mouthDown', []), chin_gnathion=landmarks_output.get('chinBottom', []), upper_jawline_left=landmarks_output.get('upperJawlineLeft', []), mid_jawline_left=landmarks_output.get('midJawlineLeft', []), mid_jawline_right=landmarks_output.get('midJawlineRight', []), upper_jawline_right=landmarks_output.get('upperJawlineRight', []))
poses = FacePoses(roll=face.get('Pose', {}).get('Roll'), yaw=face.get('Pose', {}).get('Yaw'), pitch=face.get('Pose', {}).get('Pitch'))
faces_list.append(FaceItem(age=age_output, gender=face.get('Gender', {}).get('Value'), facial_hair=facial_hair, features=features, accessories=accessories, quality=quality, emotions=emotions, landmarks=landmarks, poses=poses, confidence=(face.get('Confidence', 0.0) / 100), bounding_box=FaceBoundingBox(x_min=face.get('BoundingBox', {}).get('Left', 0.0), x_max=(face.get('BoundingBox', {}).get('Left', 0.0) + face.get('BoundingBox', {}).get('Width', 0.0)), y_min=face.get('BoundingBox', {}).get('Top', 0.0), y_max=(face.get('BoundingBox', {}).get('Top', 0.0) + face.get('BoundingBox', {}).get('Height', 0.0))), occlusions=FaceOcclusions.default(), makeup=FaceMakeup.default(), hair=FaceHair.default()))
standardized_response = FaceDetectionDataClass(items=faces_list)
return ResponseType[FaceDetectionDataClass](original_response=original_response, standardized_response=standardized_response)
def image__explicit_content(self, file: str, file_url: str='') -> ResponseType[ExplicitContentDataClass]:
with open(file, 'rb') as file_:
file_content = file_.read()
payload = {'Image': {'Bytes': file_content}, 'MinConfidence': 20}
response = handle_amazon_call(self.clients['image'].detect_moderation_labels, **payload)
items = []
for label in response.get('ModerationLabels', []):
classificator = CategoryType.choose_category_subcategory(label.get('Name'))
items.append(ExplicitItem(label=label.get('Name'), category=classificator['category'], subcategory=classificator['subcategory'], likelihood=standardized_confidence_score((label.get('Confidence') / 100)), likelihood_score=(label.get('Confidence') / 100)))
nsfw_likelihood = ExplicitContentDataClass.calculate_nsfw_likelihood(items)
nsfw_likelihood_score = ExplicitContentDataClass.calculate_nsfw_likelihood_score(items)
standardized_response = ExplicitContentDataClass(items=items, nsfw_likelihood=nsfw_likelihood, nsfw_likelihood_score=nsfw_likelihood_score)
return ResponseType[ExplicitContentDataClass](original_response=response, standardized_response=standardized_response)
def image__face_recognition__create_collection(self, collection_id: str) -> FaceRecognitionCreateCollectionDataClass:
payload = {'CollectionId': collection_id}
handle_amazon_call(self.clients['image'].create_collection, **payload)
return FaceRecognitionCreateCollectionDataClass(collection_id=collection_id)
def image__face_recognition__list_collections(self) -> ResponseType[FaceRecognitionListCollectionsDataClass]:
response = handle_amazon_call(self.clients['image'].list_collections)
return ResponseType[FaceRecognitionListCollectionsDataClass](original_response=response, standardized_response=FaceRecognitionListCollectionsDataClass(collections=response['CollectionIds']))
def image__face_recognition__list_faces(self, collection_id: str) -> ResponseType[FaceRecognitionListFacesDataClass]:
payload = {'CollectionId': collection_id}
response = handle_amazon_call(self.clients['image'].list_faces, **payload)
face_ids = [face['FaceId'] for face in response['Faces']]
return ResponseType(original_response=response, standardized_response=FaceRecognitionListFacesDataClass(face_ids=face_ids))
def image__face_recognition__delete_collection(self, collection_id: str) -> ResponseType[FaceRecognitionDeleteCollectionDataClass]:
payload = {'CollectionId': collection_id}
response = handle_amazon_call(self.clients['image'].delete_collection, **payload)
return ResponseType(original_response=response, standardized_response=FaceRecognitionDeleteCollectionDataClass(deleted=True))
def image__face_recognition__add_face(self, collection_id: str, file: str, file_url: str='') -> ResponseType[FaceRecognitionAddFaceDataClass]:
with open(file, 'rb') as file_:
file_content = file_.read()
payload = {'CollectionId': collection_id, 'Image': {'Bytes': file_content}}
response = handle_amazon_call(self.clients['image'].index_faces, **payload)
face_ids = [face['Face']['FaceId'] for face in response['FaceRecords']]
if (len(face_ids) == 0):
raise ProviderException('No face detected in the image')
return ResponseType(original_response=response, standardized_response=FaceRecognitionAddFaceDataClass(face_ids=face_ids))
def image__face_recognition__delete_face(self, collection_id, face_id) -> ResponseType[FaceRecognitionDeleteFaceDataClass]:
payload = {'CollectionId': collection_id, 'FaceIds': [face_id]}
response = handle_amazon_call(self.clients['image'].delete_faces, **payload)
return ResponseType(original_response=response, standardized_response=FaceRecognitionDeleteFaceDataClass(deleted=True))
def image__face_recognition__recognize(self, collection_id: str, file: str, file_url: str='') -> ResponseType[FaceRecognitionRecognizeDataClass]:
client = self.clients['image']
with open(file, 'rb') as file_:
file_content = file_.read()
list_faces = self.image__face_recognition__list_faces(collection_id)
if (len(list_faces.standardized_response.face_ids) == 0):
raise ProviderException('Face Collection is empty.')
payload = {'CollectionId': collection_id, 'Image': {'Bytes': file_content}}
response = handle_amazon_call(self.clients['image'].search_faces_by_image, **payload)
faces = [FaceRecognitionRecognizedFaceDataClass(confidence=(face['Similarity'] / 100), face_id=face['Face']['FaceId']) for face in response['FaceMatches']]
return ResponseType(original_response=response, standardized_response=FaceRecognitionRecognizeDataClass(items=faces))
def image__face_compare(self, file1: str, file2: str, file1_url: str='', file2_url: str='') -> ResponseType[FaceCompareDataClass]:
client = self.clients.get('image')
image_source = {}
image_tar = {}
with open(file1, 'rb') as file1_:
file1_content = file1_.read()
image_source['Bytes'] = file1_content
with open(file2, 'rb') as file2_:
file2_content = file2_.read()
image_tar['Bytes'] = file2_content
try:
response = client.compare_faces(SourceImage=image_source, TargetImage=image_tar)
except Exception as excp:
raise ProviderException(str(excp), code=400)
face_match_list = []
for face_match in response.get('FaceMatches', []):
position = face_match['Face']['BoundingBox']
similarity = (face_match.get('Similarity') or 0)
bounding_box = FaceCompareBoundingBox(top=position['Top'], left=position['Left'], height=position['Height'], width=position['Width'])
face_match_obj = FaceMatch(confidence=(similarity / 100), bounding_box=bounding_box)
face_match_list.append(face_match_obj)
return ResponseType(original_response=response, standardized_response=FaceCompareDataClass(items=face_match_list)) |
def load_mapping(name, filename, mapping_is_raw):
raw_regexp = re.compile(b'^([^=]+)[ ]*=[ ]*(.+)$')
string_regexp = b'"(((\\.)|(\\")|[^"])*)"'
quoted_regexp = re.compile(((((b'^' + string_regexp) + b'[ ]*=[ ]*') + string_regexp) + b'$'))
def parse_raw_line(line):
m = raw_regexp.match(line)
if (m == None):
return None
return (m.group(1).strip(), m.group(2).strip())
def process_unicode_escape_sequences(s):
return s.decode('utf8').encode('ascii', 'backslashreplace').decode('unicode-escape').encode('utf8')
def parse_quoted_line(line):
m = quoted_regexp.match(line)
if (m == None):
return
return (process_unicode_escape_sequences(m.group(1)), process_unicode_escape_sequences(m.group(5)))
cache = {}
if (not os.path.exists(filename)):
sys.stderr.write(('Could not open mapping file [%s]\n' % filename))
return cache
f = open(filename, 'rb')
l = 0
a = 0
for line in f.readlines():
l += 1
line = line.strip()
if ((l == 1) and (line[0:1] == b'#') and (line == b'# quoted-escaped-strings')):
continue
elif ((line == b'') or (line[0:1] == b'#')):
continue
m = (parse_raw_line(line) if mapping_is_raw else parse_quoted_line(line))
if (m == None):
sys.stderr.write(('Invalid file format in [%s], line %d\n' % (filename, l)))
continue
cache[m[0]] = m[1]
a += 1
f.close()
sys.stderr.write(('Loaded %d %s\n' % (a, name)))
return cache |
class ILAgent(nn.Module):
def __init__(self, model: nn.Module, num_envs: int, num_mini_batch: int, lr: Optional[float]=None, encoder_lr: Optional[float]=None, eps: Optional[float]=None, max_grad_norm: Optional[float]=None, wd: Optional[float]=None) -> None:
super().__init__()
self.model = model
self.num_mini_batch = num_mini_batch
self.max_grad_norm = max_grad_norm
self.num_envs = num_envs
(visual_encoder_params, other_params) = ([], [])
for (name, param) in model.named_parameters():
if param.requires_grad:
if (('net.visual_encoder.backbone' in name) or ('net.goal_visual_encoder.backbone' in name)):
visual_encoder_params.append(param)
else:
other_params.append(param)
self.optimizer = optim.AdamW([{'params': visual_encoder_params, 'lr': encoder_lr}, {'params': other_params, 'lr': lr}], lr=lr, eps=eps, weight_decay=wd)
self.device = next(model.parameters()).device
def forward(self, *x):
raise NotImplementedError
def update(self, rollouts) -> Tuple[(float, float, float)]:
total_loss_epoch = 0.0
profiling_wrapper.range_push('BC.update epoch')
data_generator = rollouts.recurrent_generator(self.num_mini_batch)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
hidden_states = []
for sample in data_generator:
(obs_batch, recurrent_hidden_states_batch, actions_batch, prev_actions_batch, masks_batch, idx) = sample
(logits, rnn_hidden_states, distribution_entropy) = self.model(obs_batch, recurrent_hidden_states_batch, prev_actions_batch, masks_batch)
(T, N, _) = actions_batch.shape
logits = logits.view(T, N, (- 1))
action_loss = cross_entropy_loss(logits.permute(0, 2, 1), actions_batch.squeeze((- 1)))
self.optimizer.zero_grad()
inflections_batch = obs_batch['inflection_weight']
total_loss = ((inflections_batch * action_loss).sum(0) / inflections_batch.sum(0)).mean()
self.before_backward(total_loss)
total_loss.backward()
self.after_backward(total_loss)
self.before_step()
self.optimizer.step()
self.after_step()
total_loss_epoch += total_loss.item()
hidden_states.append(rnn_hidden_states)
profiling_wrapper.range_pop()
hidden_states = torch.cat(hidden_states, dim=1)
total_loss_epoch /= self.num_mini_batch
return (total_loss_epoch, hidden_states)
def before_backward(self, loss: Tensor) -> None:
pass
def after_backward(self, loss: Tensor) -> None:
pass
def before_step(self) -> None:
nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
def after_step(self) -> None:
pass |
def linear_poisson(solver_parameters, mesh_num, porder):
mesh = UnitSquareMesh(mesh_num, mesh_num)
V = FunctionSpace(mesh, 'CG', porder)
u = TrialFunction(V)
v = TestFunction(V)
f = Function(V)
(x, y) = SpatialCoordinate(mesh)
f.interpolate((((((- 8.0) * pi) * pi) * cos(((x * pi) * 2))) * cos(((y * pi) * 2))))
a = ((- inner(grad(u), grad(v))) * dx)
L = (inner(f, v) * dx)
g = Function(V)
g.interpolate((cos(((2 * pi) * x)) * cos(((2 * pi) * y))))
u_ = Function(V)
bc1 = EquationBC(((inner(u, v) * ds(1)) == (inner(g, v) * ds(1))), u_, 1)
solve((a == L), u_, bcs=[bc1], solver_parameters=solver_parameters)
f.interpolate((cos(((x * pi) * 2)) * cos(((y * pi) * 2))))
return sqrt(assemble((inner((u_ - f), (u_ - f)) * dx))) |
def all_are_valid_links(links):
for link in links:
try:
res = requests.get(link)
res.raise_for_status()
return True
except:
st.warning(f'''The following link does not respond. Please check if it is correct and try again.
{link}''', icon='')
st.stop() |
.django_db
def test_spending_by_transaction_count(monkeypatch, transaction_type_data, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
request_data = {'filters': {'keywords': ['pop tart']}}
results = spending_by_transaction_count(request_data)
expected_results = {'contracts': 1, 'grants': 1, 'idvs': 1, 'loans': 1, 'direct_payments': 1, 'other': 1}
assert (results == expected_results) |
def action_to_str(act):
action_type = act.cls_action_type
if (action_type == ofproto_v1_2.OFPAT_OUTPUT):
port = UTIL.ofp_port_to_user(act.port)
buf = ('OUTPUT:' + str(port))
elif (action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT):
buf = 'COPY_TTL_OUT'
elif (action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN):
buf = 'COPY_TTL_IN'
elif (action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL):
buf = ('SET_MPLS_TTL:' + str(act.mpls_ttl))
elif (action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL):
buf = 'DEC_MPLS_TTL'
elif (action_type == ofproto_v1_2.OFPAT_PUSH_VLAN):
buf = ('PUSH_VLAN:' + str(act.ethertype))
elif (action_type == ofproto_v1_2.OFPAT_POP_VLAN):
buf = 'POP_VLAN'
elif (action_type == ofproto_v1_2.OFPAT_PUSH_MPLS):
buf = ('PUSH_MPLS:' + str(act.ethertype))
elif (action_type == ofproto_v1_2.OFPAT_POP_MPLS):
buf = ('POP_MPLS:' + str(act.ethertype))
elif (action_type == ofproto_v1_2.OFPAT_SET_QUEUE):
queue_id = UTIL.ofp_queue_to_user(act.queue_id)
buf = ('SET_QUEUE:' + str(queue_id))
elif (action_type == ofproto_v1_2.OFPAT_GROUP):
group_id = UTIL.ofp_group_to_user(act.group_id)
buf = ('GROUP:' + str(group_id))
elif (action_type == ofproto_v1_2.OFPAT_SET_NW_TTL):
buf = ('SET_NW_TTL:' + str(act.nw_ttl))
elif (action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL):
buf = 'DEC_NW_TTL'
elif (action_type == ofproto_v1_2.OFPAT_SET_FIELD):
buf = ('SET_FIELD: {%s:%s}' % (act.key, act.value))
else:
buf = 'UNKNOWN'
return buf |
.parametrize('uri, expected', (('eth://block/byhash/0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080?score=11', ('0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080', 78, 11)), ('eth://block/byhash/0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080?score=1,1', ('0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080', 78, 11)), ('eth://block/byhash/0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080?score=1 1', ('0x113f05289c685eb5b87d433c3e09ec2bfa51d6472cc37108d03b0113b11e3080', 78, 11))))
def test_parse_checkpoint(uri, expected):
(block_hash, block_number, block_score) = expected
checkpoint = parse_checkpoint_uri(uri, MAINNET_NETWORK_ID)
assert (encode_hex(checkpoint.block_hash) == block_hash)
assert (checkpoint.score == block_score) |
class RuleEngineTest(ForsetiTestCase):
def setUp(self):
project0 = fre.resource_util.create_resource(resource_id='test_project', resource_type='project')
project1 = fre.resource_util.create_resource(resource_id='project1', resource_type='project')
project2 = fre.resource_util.create_resource(resource_id='project2', resource_type='project')
project3 = fre.resource_util.create_resource(resource_id='project3', resource_type='project')
exception = fre.resource_util.create_resource(resource_id='honeypot_exception', resource_type='project')
folder1 = fre.resource_util.create_resource(resource_id='folder1', resource_type='folder')
folder2 = fre.resource_util.create_resource(resource_id='test_instances', resource_type='folder')
folder3 = fre.resource_util.create_resource(resource_id='folder3', resource_type='folder')
folder4 = fre.resource_util.create_resource(resource_id='folder4', resource_type='folder')
org = fre.resource_util.create_resource(resource_id='org', resource_type='organization')
self.project_resource_map = {'test_project': project0, 'project1': project1, 'project2': project2, 'project3': project3, 'honeypot_exception': exception}
self.ancestry = {project0: [folder1, org], project1: [folder2, org], project2: [folder4, folder3, org], project3: [folder3, org], exception: [folder3, org]}
def test_build_rule_book_from_yaml(self):
rules_local_path = get_datafile_path(__file__, 'firewall_test_rules.yaml')
rules_engine = fre.FirewallRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
self.assertEqual(4, len(rules_engine.rule_book.rules_map))
self.assertEqual(1, len(rules_engine.rule_book.rule_groups_map))
self.assertEqual(6, len(rules_engine.rule_book.org_policy_rules_map))
.expand([('test_project', {'name': 'policy1', 'full_name': 'organization/org/folder/folder1/project/project0/firewall/policy1/', 'network': 'network1', 'direction': 'ingress', 'allowed': [{'IPProtocol': 'tcp', 'ports': ['1', '3389']}], 'sourceRanges': ['0.0.0.0/0'], 'targetTags': ['linux']}, [{'resource_type': resource_mod.ResourceType.FIREWALL_RULE, 'resource_id': None, 'resource_name': 'policy1', 'full_name': 'organization/org/folder/folder1/project/project0/firewall/policy1/', 'rule_id': 'no_rdp_to_linux', 'violation_type': 'FIREWALL_BLACKLIST_VIOLATION', 'policy_names': ['policy1'], 'recommended_actions': {'DELETE_FIREWALL_RULES': ['policy1']}, 'resource_data': ['{"allowed": [{"IPProtocol": "tcp", "ports": ["1", "3389"]}], "direction": "INGRESS", "name": "policy1", "network": "network1", "sourceRanges": ["0.0.0.0/0"], "targetTags": ["linux"]}']}]), ('project1', {'name': 'policy1', 'full_name': 'organization/org/folder/test_instances/project/project1/firewall/policy1/', 'network': 'network1', 'direction': 'ingress', 'allowed': [{'IPProtocol': 'tcp', 'ports': ['22']}], 'sourceRanges': ['11.0.0.1'], 'targetTags': ['test']}, [{'resource_type': resource_mod.ResourceType.FIREWALL_RULE, 'resource_id': None, 'resource_name': 'policy1', 'full_name': 'organization/org/folder/test_instances/project/project1/firewall/policy1/', 'rule_id': 'test_instances_rule', 'violation_type': 'FIREWALL_WHITELIST_VIOLATION', 'policy_names': ['policy1'], 'recommended_actions': {'DELETE_FIREWALL_RULES': ['policy1']}, 'resource_data': ['{"allowed": [{"IPProtocol": "tcp", "ports": ["22"]}], "direction": "INGRESS", "name": "policy1", "network": "network1", "sourceRanges": ["11.0.0.1"], "targetTags": ["test"]}']}]), ('honeypot_exception', {'name': 'policy1', 'full_name': 'organization/org/folder/folder1/project/project0/firewall/policy1/', 'network': 'network1', 'direction': 'ingress', 'allowed': [{'IPProtocol': 'tcp', 'ports': ['1', '3389']}], 'sourceRanges': ['0.0.0.0/0'], 'targetTags': ['linux']}, [])])
def test_find_violations_from_yaml_rule_book(self, project, policy_dict, expected_violations_dicts):
rules_local_path = get_datafile_path(__file__, 'firewall_test_rules.yaml')
rules_engine = fre.FirewallRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book({})
resource = self.project_resource_map[project]
policy = fre.firewall_rule.FirewallRule.from_dict(policy_dict, validate=True)
rules_engine.rule_book.org_res_rel_dao = mock.Mock()
rules_engine.rule_book.org_res_rel_dao.find_ancestors.side_effect = (lambda x, y: self.ancestry[x])
violations = rules_engine.find_violations(resource, [policy])
expected_violations = [fre.RuleViolation(**v) for v in expected_violations_dicts]
self.assert_rule_violation_lists_equal(expected_violations, violations)
def assert_rule_violation_lists_equal(self, expected, violations):
sorted(expected, key=(lambda k: k.resource_id))
sorted(violations, key=(lambda k: k.resource_id))
self.assertCountEqual(expected, violations) |
_grad()
def demo_clip_features(text_query: str) -> None:
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
clip_embs = extract_clip_features(image_paths, device)
clip_embs /= clip_embs.norm(dim=(- 1), keepdim=True)
(model, _) = clip.load(CLIPArgs.model_name, device=device)
tokens = tokenize(text_query).to(device)
text_embs = model.encode_text(tokens)
text_embs /= text_embs.norm(dim=(- 1), keepdim=True)
sims = (clip_embs text_embs.T)
sims = sims.squeeze()
plt.figure()
cmap = plt.get_cmap('turbo')
for (idx, (image_path, sim)) in enumerate(zip(image_paths, sims)):
plt.subplot(2, len(image_paths), (idx + 1))
plt.imshow(Image.open(image_path))
plt.title(os.path.basename(image_path))
plt.axis('off')
plt.subplot(2, len(image_paths), ((len(image_paths) + idx) + 1))
sim_norm = ((sim - sim.min()) / (sim.max() - sim.min()))
heatmap = cmap(sim_norm.cpu().numpy())
plt.imshow(heatmap)
plt.axis('off')
plt.tight_layout()
plt.suptitle(f'Similarity to language query "{text_query}"')
text_label = text_query.replace(' ', '-')
plt_fname = f'demo_clip_features_{text_label}.png'
plt.savefig(plt_fname)
print(f'Saved plot to {plt_fname}')
plt.show() |
class OptionPlotoptionsStreamgraphStatesSelect(Options):
def animation(self) -> 'OptionPlotoptionsStreamgraphStatesSelectAnimation':
return self._config_sub_data('animation', OptionPlotoptionsStreamgraphStatesSelectAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def halo(self) -> 'OptionPlotoptionsStreamgraphStatesSelectHalo':
return self._config_sub_data('halo', OptionPlotoptionsStreamgraphStatesSelectHalo)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionPlotoptionsStreamgraphStatesSelectMarker':
return self._config_sub_data('marker', OptionPlotoptionsStreamgraphStatesSelectMarker) |
class OptionSeriesPackedbubbleSonificationDefaultinstrumentoptionsPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
.parametrize('val, expected', (('0', 0), ('-1', (- 1)), ('255', 255), ('0x0', ValueError), ('0x1', ValueError), ('1.1', ValueError), ('a', ValueError)))
def test_to_int_text(val, expected):
if isinstance(expected, type):
with pytest.raises(expected):
to_int(text=val)
else:
assert (to_int(text=val) == expected) |
class ParanoidSyncHandler(THBEventHandler):
interested = ['action_after']
def handle(self, evt_type, arg):
g = self.game
if hasattr(g, 'players'):
me = [len(ch.cards) for ch in g.players]
svr = sync_primitive(me, g.players)
assert (me == svr), (me, svr)
return arg |
def data(curve, surface, request):
if (request.param == 'univariate'):
(x, y) = curve
xi = np.linspace(x[0], x[(- 1)], 150)
return (x, y, xi, 0.85, CubicSmoothingSpline)
elif (request.param == 'ndgrid'):
(x, y) = surface
return (x, y, x, [0.85, 0.85], NdGridCubicSmoothingSpline) |
def _get_unique_build_json(output_evm: Dict, contract_node: Any, stmt_nodes: Dict, branch_nodes: Dict, has_fallback: bool) -> Dict:
paths = {str(i.contract_id): i.parent().absolutePath for i in ([contract_node] + contract_node.dependencies)}
bytecode = _format_link_references(output_evm)
without_metadata = _remove_metadata(output_evm['deployedBytecode']['object'])
instruction_count = (len(without_metadata) // 2)
(pc_map, statement_map, branch_map) = _generate_coverage_data(output_evm['deployedBytecode']['sourceMap'], output_evm['deployedBytecode']['opcodes'], contract_node, stmt_nodes, branch_nodes, has_fallback, instruction_count)
dependencies = []
for node in [i for i in contract_node.dependencies if (i.nodeType == 'ContractDefinition')]:
name = node.name
path_str = node.parent().absolutePath
dependencies.append(_get_alias(name, path_str))
return {'allSourcePaths': paths, 'bytecode': bytecode, 'bytecodeSha1': sha1(_remove_metadata(bytecode).encode()).hexdigest(), 'coverageMap': {'statements': statement_map, 'branches': branch_map}, 'dependencies': dependencies, 'offset': contract_node.offset, 'pcMap': pc_map, 'type': contract_node.contractKind} |
def _merge_splits(splits: Iterable[str], separator: str, chunk_size: int, chunk_overlap: int, length_function: Callable[([str], int)]) -> List[str]:
separator_len = length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = length_function(d)
if (((total + _len) + (separator_len if (len(current_doc) > 0) else 0)) > chunk_size):
if (total > chunk_size):
logging.warning(f'Created a chunk of size {total}, which is longer than the specified {self._chunk_size}')
if (len(current_doc) > 0):
doc = _join_docs(current_doc, separator)
if (doc is not None):
docs.append(doc)
while ((total > chunk_overlap) or ((((total + _len) + (separator_len if (len(current_doc) > 0) else 0)) > chunk_size) and (total > 0))):
total -= (length_function(current_doc[0]) + (separator_len if (len(current_doc) > 1) else 0))
current_doc = current_doc[1:]
current_doc.append(d)
total += (_len + (separator_len if (len(current_doc) > 1) else 0))
doc = _join_docs(current_doc, separator)
if (doc is not None):
docs.append(doc)
return docs |
class DispatchProxyServiceServicer(object):
def GetDispatch(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAttr(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAttr(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CallMethod(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ConnectEvent(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
class OptionZaxisPlotbandsLabel(Options):
def align(self):
return self._config_get('center')
def align(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def style(self):
return self._config_get(None)
def style(self, value: Any):
self._config(value, js_type=False)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def textAlign(self):
return self._config_get(None)
def textAlign(self, text: str):
self._config(text, js_type=False)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('top')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def x(self):
return self._config_get(None)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(None)
def y(self, num: float):
self._config(num, js_type=False) |
def prepare_timestamp_micros(data, schema):
if isinstance(data, datetime.datetime):
if (data.tzinfo is not None):
delta = (data - epoch)
return (((((delta.days * 24) * 3600) + delta.seconds) * MCS_PER_SECOND) + delta.microseconds)
if is_windows:
delta = (data - epoch_naive)
return (((((delta.days * 24) * 3600) + delta.seconds) * MCS_PER_SECOND) + delta.microseconds)
else:
return ((int(time.mktime(data.timetuple())) * MCS_PER_SECOND) + data.microsecond)
else:
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.