hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79023f70a4d2202d23dae4ed2e0e420489b861ce
| 854
|
py
|
Python
|
atcoder/ABC 192/D.py
|
ApocalypseMac/CP
|
b2db9aa5392a362dc0d979411788267ed9a5ff1d
|
[
"MIT"
] | null | null | null |
atcoder/ABC 192/D.py
|
ApocalypseMac/CP
|
b2db9aa5392a362dc0d979411788267ed9a5ff1d
|
[
"MIT"
] | null | null | null |
atcoder/ABC 192/D.py
|
ApocalypseMac/CP
|
b2db9aa5392a362dc0d979411788267ed9a5ff1d
|
[
"MIT"
] | null | null | null |
x = int(input())
m = int(input())
if x < 10:
if x <= m:
print(1)
else:
print(0)
else:
xarr = []
while x:
xarr = [x % 10] + xarr
x //= 10
n = len(xarr)
l = max(xarr) + 1
def check(base, xarr):
ans = xarr[0] * (base ** (n - 1))
if ans > m:
return False
return True
def check1(base, xarr):
ans = 0
for i in range(n):
ans += xarr[i] * base ** (n - 1 - i)
if ans > m:
return False
return True
r = 1
while check(2 * r, xarr):
r *= 2
r *= 2
ll, rr = l, r
while ll < rr:
mid = ll + (rr - ll) // 2
if check1(mid, xarr):
ll = mid + 1
else:
rr = mid
if ll - 1 < l:
print(0)
else:
print(ll - l)
| 18.977778
| 48
| 0.375878
|
x = int(input())
m = int(input())
if x < 10:
if x <= m:
print(1)
else:
print(0)
else:
xarr = []
while x:
xarr = [x % 10] + xarr
x //= 10
n = len(xarr)
l = max(xarr) + 1
def check(base, xarr):
ans = xarr[0] * (base ** (n - 1))
if ans > m:
return False
return True
def check1(base, xarr):
ans = 0
for i in range(n):
ans += xarr[i] * base ** (n - 1 - i)
if ans > m:
return False
return True
r = 1
while check(2 * r, xarr):
r *= 2
r *= 2
ll, rr = l, r
while ll < rr:
mid = ll + (rr - ll) // 2
if check1(mid, xarr):
ll = mid + 1
else:
rr = mid
if ll - 1 < l:
print(0)
else:
print(ll - l)
| true
| true
|
79024112a283dc48b7282c5fb6803acc421515ba
| 7,213
|
py
|
Python
|
mmseg/models/utils/inverted_residual.py
|
vietawake/mmSegmentation
|
1f643d6d81708ebf5726c48f66d02c70fe99fe00
|
[
"Apache-2.0"
] | null | null | null |
mmseg/models/utils/inverted_residual.py
|
vietawake/mmSegmentation
|
1f643d6d81708ebf5726c48f66d02c70fe99fe00
|
[
"Apache-2.0"
] | null | null | null |
mmseg/models/utils/inverted_residual.py
|
vietawake/mmSegmentation
|
1f643d6d81708ebf5726c48f66d02c70fe99fe00
|
[
"Apache-2.0"
] | null | null | null |
from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 34.511962
| 80
| 0.537224
|
from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| true
| true
|
7902418e48bfb94528b0786858720b6cafedce79
| 23,068
|
py
|
Python
|
reframe/core/meta.py
|
ChristopherBignamini/reframe
|
164bae6084dd9590f232f99f5cbeb0beed7ace26
|
[
"BSD-3-Clause"
] | null | null | null |
reframe/core/meta.py
|
ChristopherBignamini/reframe
|
164bae6084dd9590f232f99f5cbeb0beed7ace26
|
[
"BSD-3-Clause"
] | 1
|
2021-05-18T17:38:12.000Z
|
2021-05-18T17:38:12.000Z
|
reframe/core/meta.py
|
giordano/reframe
|
5b17b952f05dcc013888149c82b12d3f69306917
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Meta-class for creating regression tests.
#
import functools
import types
import reframe.core.namespaces as namespaces
import reframe.core.parameters as parameters
import reframe.core.variables as variables
import reframe.core.hooks as hooks
import reframe.utility as utils
from reframe.core.exceptions import ReframeSyntaxError
from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression
class RegressionTestMeta(type):
class MetaNamespace(namespaces.LocalNamespace):
'''Custom namespace to control the cls attribute assignment.
Regular Python class attributes can be overridden by either
parameters or variables respecting the order of execution.
A variable or a parameter may not be declared more than once in the
same class body. Overriding a variable with a parameter or the other
way around has an undefined behavior. A variable's value may be
updated multiple times within the same class body. A parameter's
value cannot be updated more than once within the same class body.
'''
def __setitem__(self, key, value):
if isinstance(value, variables.TestVar):
# Insert the attribute in the variable namespace
try:
self['_rfm_local_var_space'][key] = value
value.__set_name__(self, key)
except KeyError:
raise ReframeSyntaxError(
f'variable {key!r} is already declared'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif isinstance(value, parameters.TestParam):
# Insert the attribute in the parameter namespace
try:
self['_rfm_local_param_space'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'parameter {key!r} is already declared in this class'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
f'cannot override parameter {key!r}'
)
else:
# Insert the items manually to overide the namespace clash
# check from the base namespace.
self._namespace[key] = value
# Register functions decorated with either @sanity_function or
# @performance_variables or @performance_function decorators.
if hasattr(value, '_rfm_sanity_fn'):
try:
super().__setitem__('_rfm_sanity', value)
except KeyError:
raise ReframeSyntaxError(
'the @sanity_function decorator can only be used '
'once in the class body'
) from None
elif hasattr(value, '_rfm_perf_key'):
try:
self['_rfm_perf_fns'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'the performance function {key!r} has already been '
f'defined in this class'
) from None
# Register the final methods
if hasattr(value, '_rfm_final'):
self['_rfm_final_methods'].add(key)
# Register the hooks - if a value does not meet the conditions
# it will be simply ignored
self['_rfm_hook_registry'].add(value)
def __getitem__(self, key):
'''Expose and control access to the local namespaces.
Variables may only be retrieved if their value has been previously
set. Accessing a parameter in the class body is disallowed (the
actual test parameter is set during the class instantiation).
'''
try:
return super().__getitem__(key)
except KeyError as err:
try:
# Handle variable access
return self['_rfm_local_var_space'][key]
except KeyError:
# Handle parameter access
if key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
'accessing a test parameter from the class '
'body is disallowed'
) from None
else:
# As the last resource, look if key is a variable in
# any of the base classes. If so, make its value
# available in the current class' namespace.
for b in self['_rfm_bases']:
if key in b._rfm_var_space:
# Store a deep-copy of the variable's
# value and return.
v = b._rfm_var_space[key].default_value
self._namespace[key] = v
return self._namespace[key]
# If 'key' is neither a variable nor a parameter,
# raise the exception from the base __getitem__.
raise err from None
def reset(self, key):
'''Reset an item to rerun it through the __setitem__ logic.'''
self[key] = self[key]
class WrappedFunction:
'''Descriptor to wrap a free function as a bound-method.
The free function object is wrapped by the constructor. Instances
of this class should be inserted into the namespace of the target class
with the desired name for the bound-method. Since this class is a
descriptor, the `__get__` method will return the right bound-method
when accessed from a class instance.
:meta private:
'''
__slots__ = ('fn')
def __init__(self, fn, name=None):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
self.fn = _fn
if name:
self.fn.__name__ = name
def __get__(self, obj, objtype=None):
if objtype is None:
objtype = type(obj)
self.fn.__qualname__ = '.'.join(
[objtype.__qualname__, self.fn.__name__]
)
if obj is None:
return self.fn
return types.MethodType(self.fn, obj)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __getattr__(self, name):
if name in self.__slots__:
return super().__getattr__(name)
else:
return getattr(self.fn, name)
def __setattr__(self, name, value):
if name in self.__slots__:
super().__setattr__(name, value)
else:
setattr(self.fn, name, value)
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
namespace = super().__prepare__(name, bases, **kwargs)
# Keep reference to the bases inside the namespace
namespace['_rfm_bases'] = [
b for b in bases if hasattr(b, '_rfm_var_space')
]
# Regression test parameter space defined at the class level
local_param_space = namespaces.LocalNamespace()
namespace['_rfm_local_param_space'] = local_param_space
# Directive to insert a regression test parameter directly in the
# class body as: `P0 = parameter([0,1,2,3])`.
namespace['parameter'] = parameters.TestParam
# Regression test var space defined at the class level
local_var_space = namespaces.LocalNamespace()
namespace['_rfm_local_var_space'] = local_var_space
# Directives to add/modify a regression test variable
namespace['variable'] = variables.TestVar
namespace['required'] = variables.Undefined
# Utility decorators
namespace['_rfm_ext_bound'] = set()
def bind(fn, name=None):
'''Directive to bind a free function to a class.
See online docs for more information.
.. note::
Functions bound using this directive must be re-inspected after
the class body execution has completed. This directive attaches
the external method into the class namespace and returns the
associated instance of the :class:`WrappedFunction`. However,
this instance may be further modified by other ReFrame builtins
such as :func:`run_before`, :func:`run_after`, :func:`final` and
so on after it was added to the namespace, which would bypass
the logic implemented in the :func:`__setitem__` method from the
:class:`MetaNamespace` class. Hence, we track the items set by
this directive in the ``_rfm_ext_bound`` set, so they can be
later re-inspected.
'''
inst = metacls.WrappedFunction(fn, name)
namespace[inst.__name__] = inst
# Track the imported external functions
namespace['_rfm_ext_bound'].add(inst.__name__)
return inst
def final(fn):
'''Indicate that a function is final and cannot be overridden.'''
fn._rfm_final = True
return fn
namespace['bind'] = bind
namespace['final'] = final
namespace['_rfm_final_methods'] = set()
# Hook-related functionality
def run_before(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('pre_' + stage)
def run_after(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('post_' + stage)
namespace['run_before'] = run_before
namespace['run_after'] = run_after
namespace['require_deps'] = hooks.require_deps
namespace['_rfm_hook_registry'] = hooks.HookRegistry()
# Machinery to add a sanity function
def sanity_function(fn):
'''Mark a function as the test's sanity function.
Decorated functions must be unary and they will be converted into
deferred expressions.
'''
_def_fn = deferrable(fn)
setattr(_def_fn, '_rfm_sanity_fn', True)
return _def_fn
namespace['sanity_function'] = sanity_function
namespace['deferrable'] = deferrable
# Machinery to add performance functions
def performance_function(units, *, perf_key=None):
'''Decorate a function to extract a performance variable.
The ``units`` argument indicates the units of the performance
variable to be extracted.
The ``perf_key`` optional arg will be used as the name of the
performance variable. If not provided, the function name will
be used as the performance variable name.
'''
if not isinstance(units, str):
raise TypeError('performance units must be a string')
if perf_key and not isinstance(perf_key, str):
raise TypeError("'perf_key' must be a string")
def _deco_wrapper(func):
if not utils.is_trivially_callable(func, non_def_args=1):
raise TypeError(
f'performance function {func.__name__!r} has more '
f'than one argument without a default value'
)
@functools.wraps(func)
def _perf_fn(*args, **kwargs):
return _DeferredPerformanceExpression(
func, units, *args, **kwargs
)
_perf_key = perf_key if perf_key else func.__name__
setattr(_perf_fn, '_rfm_perf_key', _perf_key)
return _perf_fn
return _deco_wrapper
namespace['performance_function'] = performance_function
namespace['_rfm_perf_fns'] = namespaces.LocalNamespace()
return metacls.MetaNamespace(namespace)
def __new__(metacls, name, bases, namespace, **kwargs):
'''Remove directives from the class namespace.
It does not make sense to have some directives available after the
class was created or even at the instance level (e.g. doing
``self.parameter([1, 2, 3])`` does not make sense). So here, we
intercept those directives out of the namespace before the class is
constructed.
'''
directives = [
'parameter', 'variable', 'bind', 'run_before', 'run_after',
'require_deps', 'required', 'deferrable', 'sanity_function',
'final', 'performance_function'
]
for b in directives:
namespace.pop(b, None)
# Reset the external functions imported through the bind directive.
for item in namespace.pop('_rfm_ext_bound'):
namespace.reset(item)
return super().__new__(metacls, name, bases, dict(namespace), **kwargs)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
# Create a set with the attribute names already in use.
cls._rfm_dir = set()
for base in (b for b in bases if hasattr(b, '_rfm_dir')):
cls._rfm_dir.update(base._rfm_dir)
used_attribute_names = set(cls._rfm_dir)
# Build the var space and extend the target namespace
variables.VarSpace(cls, used_attribute_names)
used_attribute_names.update(cls._rfm_var_space.vars)
# Build the parameter space
parameters.ParamSpace(cls, used_attribute_names)
# Update used names set with the local __dict__
cls._rfm_dir.update(cls.__dict__)
# Update the hook registry with the bases
for base in cls._rfm_bases:
cls._rfm_hook_registry.update(
base._rfm_hook_registry, denied_hooks=namespace
)
# Search the bases if no local sanity functions exist.
if '_rfm_sanity' not in namespace:
for base in cls._rfm_bases:
if hasattr(base, '_rfm_sanity'):
cls._rfm_sanity = getattr(base, '_rfm_sanity')
if cls._rfm_sanity.__name__ in namespace:
raise ReframeSyntaxError(
f'{cls.__qualname__!r} overrides the candidate '
f'sanity function '
f'{cls._rfm_sanity.__qualname__!r} without '
f'defining an alternative'
)
break
# Update the performance function dict with the bases.
for base in cls._rfm_bases:
for k, v in base._rfm_perf_fns.items():
if k not in namespace:
try:
cls._rfm_perf_fns[k] = v
except KeyError:
'''Performance function overridden by other class'''
# Add the final functions from its parents
cls._rfm_final_methods.update(
*(b._rfm_final_methods for b in cls._rfm_bases)
)
if getattr(cls, '_rfm_override_final', None):
return
for b in cls._rfm_bases:
for key in b._rfm_final_methods:
if key in namespace and callable(namespace[key]):
msg = (f"'{cls.__qualname__}.{key}' attempts to "
f"override final method "
f"'{b.__qualname__}.{key}'; "
f"you should use the pipeline hooks instead")
raise ReframeSyntaxError(msg)
def __call__(cls, *args, **kwargs):
'''Inject parameter and variable spaces during object construction.
When a class is instantiated, this method intercepts the arguments
associated to the parameter and variable spaces. This prevents both
:func:`__new__` and :func:`__init__` methods from ever seing these
arguments.
The parameter and variable spaces are injected into the object after
construction and before initialization.
'''
# Intercept constructor arguments
_rfm_use_params = kwargs.pop('_rfm_use_params', False)
obj = cls.__new__(cls, *args, **kwargs)
# Insert the var & param spaces
cls._rfm_var_space.inject(obj, cls)
cls._rfm_param_space.inject(obj, cls, _rfm_use_params)
obj.__init__(*args, **kwargs)
return obj
def __getattribute__(cls, name):
'''Attribute lookup method for custom class attributes.
ReFrame test variables are descriptors injected at the class level.
If a variable descriptor has already been injected into the class,
do not return the descriptor object and return the default value
associated with that variable instead.
.. warning::
.. versionchanged:: 3.7.0
Prior versions exposed the variable descriptor object if this
was already present in the class, instead of returning the
variable's default value.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
except AttributeError:
var_space = None
# If the variable is already injected, delegate lookup to __getattr__.
if var_space and name in var_space.injected_vars:
raise AttributeError('delegate variable lookup to __getattr__')
# Default back to the base method if no special treatment required.
return super().__getattribute__(name)
def __getattr__(cls, name):
'''Backup attribute lookup method into custom namespaces.
Some ReFrame built-in types are stored under their own sub-namespaces.
This method will perform an attribute lookup on these sub-namespaces
if a call to the default :func:`__getattribute__` method fails to
retrieve the requested class attribute.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
return var_space.vars[name]
except AttributeError:
'''Catch early access attempt to the variable space.'''
except KeyError:
'''Requested name not in variable space.'''
try:
param_space = super().__getattribute__('_rfm_param_space')
return param_space.params[name]
except AttributeError:
'''Catch early access attempt to the parameter space.'''
except KeyError:
'''Requested name not in parameter space.'''
raise AttributeError(
f'class {cls.__qualname__!r} has no attribute {name!r}'
) from None
def setvar(cls, name, value):
'''Set the value of a variable.
:param name: The name of the variable.
:param value: The value of the variable.
:returns: :class:`True` if the variable was set.
A variable will *not* be set, if it does not exist or when an
attempt is made to set it with its underlying descriptor.
This happens during the variable injection time and it should be
delegated to the class' :func:`__setattr__` method.
:raises ReframeSyntaxError: If an attempt is made to override a
variable with a descriptor other than its underlying one.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
if name in var_space:
if not hasattr(value, '__get__'):
var_space[name].define(value)
return True
elif var_space[name].field is not value:
desc = '.'.join([cls.__qualname__, name])
raise ReframeSyntaxError(
f'cannot override variable descriptor {desc!r}'
)
else:
# Variable is being injected
return False
except AttributeError:
'''Catch early access attempt to the variable space.'''
return False
def __setattr__(cls, name, value):
'''Handle the special treatment required for variables and parameters.
A variable's default value can be updated when accessed as a regular
class attribute. This behavior does not apply when the assigned value
is a descriptor object. In that case, the task of setting the value is
delegated to the base :func:`__setattr__` (this is to comply with
standard Python behavior). However, since the variables are already
descriptors which are injected during class instantiation, we disallow
any attempt to override this descriptor (since it would be silently
re-overridden in any case).
Altering the value of a parameter when accessed as a class attribute
is not allowed. This would break the parameter space internals.
'''
# Try to treat `name` as variable
if cls.setvar(name, value):
return
# Try to treat `name` as a parameter
try:
# Catch attempts to override a test parameter
param_space = super().__getattribute__('_rfm_param_space')
if name in param_space.params:
raise ReframeSyntaxError(f'cannot override parameter {name!r}')
except AttributeError:
'''Catch early access attempt to the parameter space.'''
# Treat `name` as normal class attribute
super().__setattr__(name, value)
@property
def param_space(cls):
''' Make the parameter space available as read-only.'''
return cls._rfm_param_space
def is_abstract(cls):
'''Check if the class is an abstract test.
This is the case when some parameters are undefined, which results in
the length of the parameter space being 0.
:return: bool indicating whether the test has undefined parameters.
:meta private:
'''
return len(cls.param_space) == 0
| 39.432479
| 79
| 0.588304
|
import functools
import types
import reframe.core.namespaces as namespaces
import reframe.core.parameters as parameters
import reframe.core.variables as variables
import reframe.core.hooks as hooks
import reframe.utility as utils
from reframe.core.exceptions import ReframeSyntaxError
from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression
class RegressionTestMeta(type):
class MetaNamespace(namespaces.LocalNamespace):
def __setitem__(self, key, value):
if isinstance(value, variables.TestVar):
try:
self['_rfm_local_var_space'][key] = value
value.__set_name__(self, key)
except KeyError:
raise ReframeSyntaxError(
f'variable {key!r} is already declared'
) from None
self._namespace.pop(key, None)
return
elif isinstance(value, parameters.TestParam):
try:
self['_rfm_local_param_space'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'parameter {key!r} is already declared in this class'
) from None
self._namespace.pop(key, None)
return
elif key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
f'cannot override parameter {key!r}'
)
else:
self._namespace[key] = value
if hasattr(value, '_rfm_sanity_fn'):
try:
super().__setitem__('_rfm_sanity', value)
except KeyError:
raise ReframeSyntaxError(
'the @sanity_function decorator can only be used '
'once in the class body'
) from None
elif hasattr(value, '_rfm_perf_key'):
try:
self['_rfm_perf_fns'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'the performance function {key!r} has already been '
f'defined in this class'
) from None
if hasattr(value, '_rfm_final'):
self['_rfm_final_methods'].add(key)
self['_rfm_hook_registry'].add(value)
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError as err:
try:
return self['_rfm_local_var_space'][key]
except KeyError:
if key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
'accessing a test parameter from the class '
'body is disallowed'
) from None
else:
for b in self['_rfm_bases']:
if key in b._rfm_var_space:
# Store a deep-copy of the variable's
v = b._rfm_var_space[key].default_value
self._namespace[key] = v
return self._namespace[key]
raise err from None
def reset(self, key):
self[key] = self[key]
class WrappedFunction:
__slots__ = ('fn')
def __init__(self, fn, name=None):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
self.fn = _fn
if name:
self.fn.__name__ = name
def __get__(self, obj, objtype=None):
if objtype is None:
objtype = type(obj)
self.fn.__qualname__ = '.'.join(
[objtype.__qualname__, self.fn.__name__]
)
if obj is None:
return self.fn
return types.MethodType(self.fn, obj)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __getattr__(self, name):
if name in self.__slots__:
return super().__getattr__(name)
else:
return getattr(self.fn, name)
def __setattr__(self, name, value):
if name in self.__slots__:
super().__setattr__(name, value)
else:
setattr(self.fn, name, value)
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
namespace = super().__prepare__(name, bases, **kwargs)
namespace['_rfm_bases'] = [
b for b in bases if hasattr(b, '_rfm_var_space')
]
local_param_space = namespaces.LocalNamespace()
namespace['_rfm_local_param_space'] = local_param_space
namespace['parameter'] = parameters.TestParam
local_var_space = namespaces.LocalNamespace()
namespace['_rfm_local_var_space'] = local_var_space
namespace['variable'] = variables.TestVar
namespace['required'] = variables.Undefined
namespace['_rfm_ext_bound'] = set()
def bind(fn, name=None):
inst = metacls.WrappedFunction(fn, name)
namespace[inst.__name__] = inst
namespace['_rfm_ext_bound'].add(inst.__name__)
return inst
def final(fn):
fn._rfm_final = True
return fn
namespace['bind'] = bind
namespace['final'] = final
namespace['_rfm_final_methods'] = set()
def run_before(stage):
return hooks.attach_to('pre_' + stage)
def run_after(stage):
return hooks.attach_to('post_' + stage)
namespace['run_before'] = run_before
namespace['run_after'] = run_after
namespace['require_deps'] = hooks.require_deps
namespace['_rfm_hook_registry'] = hooks.HookRegistry()
def sanity_function(fn):
_def_fn = deferrable(fn)
setattr(_def_fn, '_rfm_sanity_fn', True)
return _def_fn
namespace['sanity_function'] = sanity_function
namespace['deferrable'] = deferrable
def performance_function(units, *, perf_key=None):
if not isinstance(units, str):
raise TypeError('performance units must be a string')
if perf_key and not isinstance(perf_key, str):
raise TypeError("'perf_key' must be a string")
def _deco_wrapper(func):
if not utils.is_trivially_callable(func, non_def_args=1):
raise TypeError(
f'performance function {func.__name__!r} has more '
f'than one argument without a default value'
)
@functools.wraps(func)
def _perf_fn(*args, **kwargs):
return _DeferredPerformanceExpression(
func, units, *args, **kwargs
)
_perf_key = perf_key if perf_key else func.__name__
setattr(_perf_fn, '_rfm_perf_key', _perf_key)
return _perf_fn
return _deco_wrapper
namespace['performance_function'] = performance_function
namespace['_rfm_perf_fns'] = namespaces.LocalNamespace()
return metacls.MetaNamespace(namespace)
def __new__(metacls, name, bases, namespace, **kwargs):
directives = [
'parameter', 'variable', 'bind', 'run_before', 'run_after',
'require_deps', 'required', 'deferrable', 'sanity_function',
'final', 'performance_function'
]
for b in directives:
namespace.pop(b, None)
for item in namespace.pop('_rfm_ext_bound'):
namespace.reset(item)
return super().__new__(metacls, name, bases, dict(namespace), **kwargs)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
cls._rfm_dir = set()
for base in (b for b in bases if hasattr(b, '_rfm_dir')):
cls._rfm_dir.update(base._rfm_dir)
used_attribute_names = set(cls._rfm_dir)
variables.VarSpace(cls, used_attribute_names)
used_attribute_names.update(cls._rfm_var_space.vars)
parameters.ParamSpace(cls, used_attribute_names)
cls._rfm_dir.update(cls.__dict__)
for base in cls._rfm_bases:
cls._rfm_hook_registry.update(
base._rfm_hook_registry, denied_hooks=namespace
)
if '_rfm_sanity' not in namespace:
for base in cls._rfm_bases:
if hasattr(base, '_rfm_sanity'):
cls._rfm_sanity = getattr(base, '_rfm_sanity')
if cls._rfm_sanity.__name__ in namespace:
raise ReframeSyntaxError(
f'{cls.__qualname__!r} overrides the candidate '
f'sanity function '
f'{cls._rfm_sanity.__qualname__!r} without '
f'defining an alternative'
)
break
for base in cls._rfm_bases:
for k, v in base._rfm_perf_fns.items():
if k not in namespace:
try:
cls._rfm_perf_fns[k] = v
except KeyError:
'''Performance function overridden by other class'''
cls._rfm_final_methods.update(
*(b._rfm_final_methods for b in cls._rfm_bases)
)
if getattr(cls, '_rfm_override_final', None):
return
for b in cls._rfm_bases:
for key in b._rfm_final_methods:
if key in namespace and callable(namespace[key]):
msg = (f"'{cls.__qualname__}.{key}' attempts to "
f"override final method "
f"'{b.__qualname__}.{key}'; "
f"you should use the pipeline hooks instead")
raise ReframeSyntaxError(msg)
def __call__(cls, *args, **kwargs):
_rfm_use_params = kwargs.pop('_rfm_use_params', False)
obj = cls.__new__(cls, *args, **kwargs)
cls._rfm_var_space.inject(obj, cls)
cls._rfm_param_space.inject(obj, cls, _rfm_use_params)
obj.__init__(*args, **kwargs)
return obj
def __getattribute__(cls, name):
try:
var_space = super().__getattribute__('_rfm_var_space')
except AttributeError:
var_space = None
if var_space and name in var_space.injected_vars:
raise AttributeError('delegate variable lookup to __getattr__')
return super().__getattribute__(name)
def __getattr__(cls, name):
try:
var_space = super().__getattribute__('_rfm_var_space')
return var_space.vars[name]
except AttributeError:
'''Catch early access attempt to the variable space.'''
except KeyError:
'''Requested name not in variable space.'''
try:
param_space = super().__getattribute__('_rfm_param_space')
return param_space.params[name]
except AttributeError:
'''Catch early access attempt to the parameter space.'''
except KeyError:
'''Requested name not in parameter space.'''
raise AttributeError(
f'class {cls.__qualname__!r} has no attribute {name!r}'
) from None
def setvar(cls, name, value):
try:
var_space = super().__getattribute__('_rfm_var_space')
if name in var_space:
if not hasattr(value, '__get__'):
var_space[name].define(value)
return True
elif var_space[name].field is not value:
desc = '.'.join([cls.__qualname__, name])
raise ReframeSyntaxError(
f'cannot override variable descriptor {desc!r}'
)
else:
return False
except AttributeError:
'''Catch early access attempt to the variable space.'''
return False
def __setattr__(cls, name, value):
if cls.setvar(name, value):
return
try:
param_space = super().__getattribute__('_rfm_param_space')
if name in param_space.params:
raise ReframeSyntaxError(f'cannot override parameter {name!r}')
except AttributeError:
'''Catch early access attempt to the parameter space.'''
super().__setattr__(name, value)
@property
def param_space(cls):
return cls._rfm_param_space
def is_abstract(cls):
return len(cls.param_space) == 0
| true
| true
|
7902435071d525ebc1985f7e0f037909fd6d32c0
| 106,955
|
py
|
Python
|
lib/pygments-1.2.2-patched/pygments/lexers/other.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | 3
|
2015-08-12T01:11:03.000Z
|
2018-09-21T11:51:03.000Z
|
lib/pygments-1.2.2-patched/pygments/lexers/other.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | null | null | null |
lib/pygments-1.2.2-patched/pygments/lexers/other.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | 1
|
2015-01-21T06:42:28.000Z
|
2015-01-21T06:42:28.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Lexers for other languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, do_insertions
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
from pygments.lexers.web import HtmlLexer
__all__ = ['SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'BrainfuckLexer',
'BashLexer', 'BatchLexer', 'BefungeLexer', 'RedcodeLexer',
'MOOCodeLexer', 'SmalltalkLexer', 'TcshLexer', 'LogtalkLexer',
'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer',
'BashSessionLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer',
'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer']
line_re = re.compile('.*?\n')
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_
language.
"""
name = 'Brainfuck'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
tokens = {
'common': [
# use different colors for different instruction types
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
*New in Pygments 0.7.*
"""
name = 'Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[\+\*/%!`-]', Operator), # Traditional math
(r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
(r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag), # Befunge-98 stack ops
(r'".*?"', String.Double), # Strings don't appear to allow escapes
(r'\'.', String.Single), # Single character
(r'[#;]', Comment), # Trampoline... depends on direction hit
(r'[pg&~=@iotsy]', Keyword), # Misc
(r'[()A-Z]', Comment), # Fingerprints
(r'\s+', Text), # Whitespace doesn't matter
],
}
class BashLexer(RegexLexer):
"""
Lexer for (ba)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh']
filenames = ['*.sh', '*.ebuild', '*.eclass']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Text),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\<]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
return shebang_matches(text, r'(ba|z|)sh')
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)?|\[\S+[@:]'
r'[^\n]+\].+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
*New in Pygments 0.8.*
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD',
'JMP','JMZ','JMN','DJN','CMP','SLT','SPL',
'ORG','EQU','END']
modifiers = ['A','B','AB','BA','F','X','I']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
(r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
(r'[A-Za-z_][A-Za-z_0-9]+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[#$@<>]', Operator), # mode
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}
class MOOCodeLexer(RegexLexer):
"""
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
*New in Pygments 0.9.*
"""
name = 'MOOCode'
filenames = ['*.moo']
aliases = ['moocode']
mimetypes = ['text/x-moocode']
tokens = {
'root' : [
# Numbers
(r'(0|[1-9][0-9_]*)', Number.Integer),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# exceptions
(r'(E_PERM|E_DIV)', Name.Exception),
# db-refs
(r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity),
# Keywords
(r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
r'|endwhile|break|continue|return|try'
r'|except|endtry|finally|in)\b', Keyword),
# builtins
(r'(random|length)', Name.Builtin),
# special variables
(r'(player|caller|this|args)', Name.Variable.Instance),
# skip whitespace
(r'\s+', Text),
(r'\n', Text),
# other operators
(r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator),
# function call
(r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)),
# variables
(r'([a-zA-Z_0-9]+)', Text),
]
}
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
*New in Pygments 0.10.*
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak']
mimetypes = ['text/x-smalltalk']
tokens = {
'root' : [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition' : [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables' : [
include('whitespaces'),
(r'(:)(\s*)([A-Za-z\w]+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
(r'', Text, '#pop'), # else pop
],
'literals' : [
(r'\'[^\']*\'', String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper' : [
include('whitespaces'),
(r'[-+*/\\~<>=|&#!?,@%\w+:]+', String.Symbol),
# literals
(r'\'[^\']*\'', String),
(r'\$.', String.Char),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth' : [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root','afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces' : [
# skip whitespace and comments
(r'\s+', Text),
(r'"[^"]*"', Comment),
],
'objects' : [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("[^"]*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject' : [
(r'! !$', Keyword , '#pop'), # squeak chunk delimeter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout' : [
# Squeak fileout format (optional)
(r'^"[^"]*"!', Keyword),
(r"^'[^']*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r'^(!)(\w+(?: class)?)( methodsFor: )(\'[^\']*\')(.*?!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|'
r'fg|filetest|getxvers|glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|set|shift|'
r'sched|setenv|setpath|settc|setty|setxvers|shift|source|stop|suspend|'
r'source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
*New in Pygments 0.10.*
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s',Punctuation,'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Execution-context methods
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])',
Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])',
Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'complements_object(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad))(?=[(])', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fail|true)\b', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading meta-predicates
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])',
Keyword),
# Term unification
(r'unify_with_occurs_check(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term)(?=[(])', Keyword),
# Evaluable functors
(r'(rem|mod|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|compound|n(onvar|umber))(?=[(])',
Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])',
Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'\bonce(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Arithemtic evaluation
(r'\bis\b', Keyword),
# Arithemtic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# Ponctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
],
'quoted_atom': [
(r"['][']", String),
(r"[']", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]',Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(fo|itialization)(?=[(])', Keyword, 'root'),
(r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|'
r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(calls|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'),
],
'entityrelations': [
(r'(extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])',
Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return True
if ':- protocol(' in text:
return True
if ':- category(' in text:
return True
return False
def _shortened(word):
dpos = word.find('$')
return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1)])
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
*New in Pygments 0.11.*
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
('', Comment, '#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der",
"box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta",
"data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign",
"fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid",
"hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle",
"la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale",
"mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin",
"rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot",
"mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics",
"nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics",
"mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput",
"pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot",
"poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze",
"st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs",
"ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le",
"v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta",
"yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel",
"yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs",
"x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs",
"zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs",
"x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs",
"noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs",
"xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs",
"noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs",
"cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange",
"y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange",
"vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis",
"zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
*New in Pygments 0.11.*
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(r'#(debug|default|else|end|error|fclose|fopen|if|ifdef|ifndef|'
r'include|range|read|render|statistics|switch|undef|version|'
r'warning|while|write|define|macro|local|declare)',
Comment.Preproc),
(r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|'
r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|'
r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|'
r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|'
r'attenuating|average|background|black_hole|blue|blur_samples|'
r'bounded_by|box_mapping|bozo|break|brick|brick_size|'
r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|'
r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|'
r'color|color_map|colour|colour_map|component|composite|concat|'
r'confidence|conic_sweep|constant|control0|control1|cos|cosh|'
r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|'
r'debug|declare|default|degrees|dents|diffuse|direction|'
r'distance|distance_maximum|div|dust|dust_type|eccentricity|'
r'else|emitting|end|error|error_bound|exp|exponent|'
r'fade_distance|fade_power|falloff|falloff_angle|false|'
r'file_exists|filter|finish|fisheye|flatness|flip|floor|'
r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|'
r'global_settings|glowing|gradient|granite|gray_threshold|'
r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|'
r'if|ifdef|iff|image_map|incidence|include|int|interpolate|'
r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|'
r'linear|linear_spline|linear_sweep|location|log|looks_like|'
r'look_at|low_error_factor|mandel|map_type|marble|material_map|'
r'matrix|max|max_intersections|max_iteration|max_trace_level|'
r'max_value|metallic|min|minimum_reuse|mod|mortar|'
r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|'
r'octaves|off|offset|omega|omnimax|on|once|onion|open|'
r'orthographic|panoramic|pattern1|pattern2|pattern3|'
r'perspective|pgm|phase|phong|phong_size|pi|pigment|'
r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|'
r'precision|pwr|quadratic_spline|quaternion|quick_color|'
r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|'
r'ramp_wave|rand|range|reciprocal|recursion_limit|red|'
r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|'
r'right|ripples|rotate|roughness|samples|scale|scallop_wave|'
r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|'
r'slice|slope_map|smooth|specular|spherical_mapping|spiral|'
r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|'
r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|'
r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|'
r'test_camera_4|texture|texture_map|tga|thickness|threshold|'
r'tightness|tile2|tiles|track|transform|translate|transmit|'
r'triangle_wave|true|ttf|turbulence|turb_depth|type|'
r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|'
r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|'
r'vnormalize|volume_object|volume_rendered|vol_with_light|'
r'vrotate|v_steps|warning|warp|water_level|waves|while|width|'
r'wood|wrinkles|yes)\b', Keyword),
(r'bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|'
r'disc|height_field|intersection|julia_fractal|lathe|'
r'light_source|merge|mesh|object|plane|poly|polygon|prism|'
r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|'
r'text|torus|triangle|union', Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Text),
]
}
class AppleScriptLexer(RegexLexer):
"""
For `AppleScript source code
<http://developer.apple.com/documentation/AppleScript/
Conceptual/AppleScriptLangGuide>`_,
including `AppleScript Studio
<http://developer.apple.com/documentation/AppleScript/
Reference/StudioReference>`_.
Contributed by Andreas Amann <aamann@mac.com>.
"""
name = 'AppleScript'
aliases = ['applescript']
filenames = ['*.applescript']
flags = re.MULTILINE | re.DOTALL
Identifiers = r'[a-zA-Z]\w*'
Literals = ['AppleScript', 'current application', 'false', 'linefeed',
'missing value', 'pi','quote', 'result', 'return', 'space',
'tab', 'text item delimiters', 'true', 'version']
Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ',
'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
'real ', 'record ', 'reference ', 'RGB color ', 'script ',
'text ', 'unit types', '(Unicode )?text', 'string']
BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month',
'paragraph', 'word', 'year']
HandlerParams = ['about', 'above', 'against', 'apart from', 'around',
'aside from', 'at', 'below', 'beneath', 'beside',
'between', 'for', 'given', 'instead of', 'on', 'onto',
'out of', 'over', 'since']
Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL',
'choose application', 'choose color', 'choose file( name)?',
'choose folder', 'choose from list',
'choose remote application', 'clipboard info',
'close( access)?', 'copy', 'count', 'current date', 'delay',
'delete', 'display (alert|dialog)', 'do shell script',
'duplicate', 'exists', 'get eof', 'get volume settings',
'info for', 'launch', 'list (disks|folder)', 'load script',
'log', 'make', 'mount volume', 'new', 'offset',
'open( (for access|location))?', 'path to', 'print', 'quit',
'random number', 'read', 'round', 'run( script)?',
'say', 'scripting components',
'set (eof|the clipboard to|volume)', 'store script',
'summarize', 'system attribute', 'system info',
'the clipboard', 'time to GMT', 'write', 'quoted form']
References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
'before', 'behind', 'every', 'front', 'index', 'last',
'middle', 'some', 'that', 'through', 'thru', 'where', 'whose']
Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not",
"isn't", "isn't equal( to)?", "is not equal( to)?",
"doesn't equal", "does not equal", "(is )?greater than",
"comes after", "is not less than or equal( to)?",
"isn't less than or equal( to)?", "(is )?less than",
"comes before", "is not greater than or equal( to)?",
"isn't greater than or equal( to)?",
"(is )?greater than or equal( to)?", "is not less than",
"isn't less than", "does not come before",
"doesn't come before", "(is )?less than or equal( to)?",
"is not greater than", "isn't greater than",
"does not come after", "doesn't come after", "starts? with",
"begins? with", "ends? with", "contains?", "does not contain",
"doesn't contain", "is in", "is contained by", "is not in",
"is not contained by", "isn't contained by", "div", "mod",
"not", "(a )?(ref( to)?|reference to)", "is", "does"]
Control = ['considering', 'else', 'error', 'exit', 'from', 'if',
'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
'try', 'until', 'using terms from', 'while', 'whith',
'with timeout( of)?', 'with transaction', 'by', 'continue',
'end', 'its?', 'me', 'my', 'return', 'of' , 'as']
Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get']
Reserved = ['but', 'put', 'returning', 'the']
StudioClasses = ['action cell', 'alert reply', 'application', 'box',
'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
'clip view', 'color well', 'color-panel',
'combo box( item)?', 'control',
'data( (cell|column|item|row|source))?', 'default entry',
'dialog reply', 'document', 'drag info', 'drawer',
'event', 'font(-panel)?', 'formatter',
'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
'movie( view)?', 'open-panel', 'outline view', 'panel',
'pasteboard', 'plugin', 'popup button',
'progress indicator', 'responder', 'save-panel',
'scroll view', 'secure text field( cell)?', 'slider',
'sound', 'split view', 'stepper', 'tab view( item)?',
'table( (column|header cell|header view|view))',
'text( (field( cell)?|view))?', 'toolbar( item)?',
'user-defaults', 'view', 'window']
StudioEvents = ['accept outline drop', 'accept table drop', 'action',
'activated', 'alert ended', 'awake from nib', 'became key',
'became main', 'begin editing', 'bounds changed',
'cell value', 'cell value changed', 'change cell value',
'change item value', 'changed', 'child of item',
'choose menu item', 'clicked', 'clicked toolbar item',
'closed', 'column clicked', 'column moved',
'column resized', 'conclude drop', 'data representation',
'deminiaturized', 'dialog ended', 'document nib name',
'double clicked', 'drag( (entered|exited|updated))?',
'drop', 'end editing', 'exposed', 'idle', 'item expandable',
'item value', 'item value changed', 'items changed',
'keyboard down', 'keyboard up', 'launched',
'load data representation', 'miniaturized', 'mouse down',
'mouse dragged', 'mouse entered', 'mouse exited',
'mouse moved', 'mouse up', 'moved',
'number of browser rows', 'number of items',
'number of rows', 'open untitled', 'opened', 'panel ended',
'parameters updated', 'plugin loaded', 'prepare drop',
'prepare outline drag', 'prepare outline drop',
'prepare table drag', 'prepare table drop',
'read from file', 'resigned active', 'resigned key',
'resigned main', 'resized( sub views)?',
'right mouse down', 'right mouse dragged',
'right mouse up', 'rows changed', 'scroll wheel',
'selected tab view item', 'selection changed',
'selection changing', 'should begin editing',
'should close', 'should collapse item',
'should end editing', 'should expand item',
'should open( untitled)?',
'should quit( after last window closed)?',
'should select column', 'should select item',
'should select row', 'should select tab view item',
'should selection change', 'should zoom', 'shown',
'update menu item', 'update parameters',
'update toolbar item', 'was hidden', 'was miniaturized',
'will become active', 'will close', 'will dismiss',
'will display browser cell', 'will display cell',
'will display item cell', 'will display outline cell',
'will finish launching', 'will hide', 'will miniaturize',
'will move', 'will open', 'will pop up', 'will quit',
'will resign active', 'will resize( sub views)?',
'will select tab view item', 'will show', 'will zoom',
'write to file', 'zoomed']
StudioCommands = ['animate', 'append', 'call method', 'center',
'close drawer', 'close panel', 'display',
'display alert', 'display dialog', 'display panel', 'go',
'hide', 'highlight', 'increment', 'item for',
'load image', 'load movie', 'load nib', 'load panel',
'load sound', 'localized string', 'lock focus', 'log',
'open drawer', 'path for', 'pause', 'perform action',
'play', 'register', 'resume', 'scroll', 'select( all)?',
'show', 'size to fit', 'start', 'step back',
'step forward', 'stop', 'synchronize', 'unlock focus',
'update']
StudioProperties = ['accepts arrow key', 'action method', 'active',
'alignment', 'allowed identifiers',
'allows branch selection', 'allows column reordering',
'allows column resizing', 'allows column selection',
'allows customization',
'allows editing text attributes',
'allows empty selection', 'allows mixed state',
'allows multiple selection', 'allows reordering',
'allows undo', 'alpha( value)?', 'alternate image',
'alternate increment value', 'alternate title',
'animation delay', 'associated file name',
'associated object', 'auto completes', 'auto display',
'auto enables items', 'auto repeat',
'auto resizes( outline column)?',
'auto save expanded items', 'auto save name',
'auto save table columns', 'auto saves configuration',
'auto scroll', 'auto sizes all columns to fit',
'auto sizes cells', 'background color', 'bezel state',
'bezel style', 'bezeled', 'border rect', 'border type',
'bordered', 'bounds( rotation)?', 'box type',
'button returned', 'button type',
'can choose directories', 'can choose files',
'can draw', 'can hide',
'cell( (background color|size|type))?', 'characters',
'class', 'click count', 'clicked( data)? column',
'clicked data item', 'clicked( data)? row',
'closeable', 'collating', 'color( (mode|panel))',
'command key down', 'configuration',
'content(s| (size|view( margins)?))?', 'context',
'continuous', 'control key down', 'control size',
'control tint', 'control view',
'controller visible', 'coordinate system',
'copies( on scroll)?', 'corner view', 'current cell',
'current column', 'current( field)? editor',
'current( menu)? item', 'current row',
'current tab view item', 'data source',
'default identifiers', 'delta (x|y|z)',
'destination window', 'directory', 'display mode',
'displayed cell', 'document( (edited|rect|view))?',
'double value', 'dragged column', 'dragged distance',
'dragged items', 'draws( cell)? background',
'draws grid', 'dynamically scrolls', 'echos bullets',
'edge', 'editable', 'edited( data)? column',
'edited data item', 'edited( data)? row', 'enabled',
'enclosing scroll view', 'ending page',
'error handling', 'event number', 'event type',
'excluded from windows menu', 'executable path',
'expanded', 'fax number', 'field editor', 'file kind',
'file name', 'file type', 'first responder',
'first visible column', 'flipped', 'floating',
'font( panel)?', 'formatter', 'frameworks path',
'frontmost', 'gave up', 'grid color', 'has data items',
'has horizontal ruler', 'has horizontal scroller',
'has parent data item', 'has resize indicator',
'has shadow', 'has sub menu', 'has vertical ruler',
'has vertical scroller', 'header cell', 'header view',
'hidden', 'hides when deactivated', 'highlights by',
'horizontal line scroll', 'horizontal page scroll',
'horizontal ruler view', 'horizontally resizable',
'icon image', 'id', 'identifier',
'ignores multiple clicks',
'image( (alignment|dims when disabled|frame style|'
'scaling))?',
'imports graphics', 'increment value',
'indentation per level', 'indeterminate', 'index',
'integer value', 'intercell spacing', 'item height',
'key( (code|equivalent( modifier)?|window))?',
'knob thickness', 'label', 'last( visible)? column',
'leading offset', 'leaf', 'level', 'line scroll',
'loaded', 'localized sort', 'location', 'loop mode',
'main( (bunde|menu|window))?', 'marker follows cell',
'matrix mode', 'maximum( content)? size',
'maximum visible columns',
'menu( form representation)?', 'miniaturizable',
'miniaturized', 'minimized image', 'minimized title',
'minimum column width', 'minimum( content)? size',
'modal', 'modified', 'mouse down state',
'movie( (controller|file|rect))?', 'muted', 'name',
'needs display', 'next state', 'next text',
'number of tick marks', 'only tick mark values',
'opaque', 'open panel', 'option key down',
'outline table column', 'page scroll', 'pages across',
'pages down', 'palette label', 'pane splitter',
'parent data item', 'parent window', 'pasteboard',
'path( (names|separator))?', 'playing',
'plays every frame', 'plays selection only', 'position',
'preferred edge', 'preferred type', 'pressure',
'previous text', 'prompt', 'properties',
'prototype cell', 'pulls down', 'rate',
'released when closed', 'repeated',
'requested print time', 'required file type',
'resizable', 'resized column', 'resource path',
'returns records', 'reuses columns', 'rich text',
'roll over', 'row height', 'rulers visible',
'save panel', 'scripts path', 'scrollable',
'selectable( identifiers)?', 'selected cell',
'selected( data)? columns?', 'selected data items?',
'selected( data)? rows?', 'selected item identifier',
'selection by rect', 'send action on arrow key',
'sends action when done editing', 'separates columns',
'separator item', 'sequence number', 'services menu',
'shared frameworks path', 'shared support path',
'sheet', 'shift key down', 'shows alpha',
'shows state by', 'size( mode)?',
'smart insert delete enabled', 'sort case sensitivity',
'sort column', 'sort order', 'sort type',
'sorted( data rows)?', 'sound', 'source( mask)?',
'spell checking enabled', 'starting page', 'state',
'string value', 'sub menu', 'super menu', 'super view',
'tab key traverses cells', 'tab state', 'tab type',
'tab view', 'table view', 'tag', 'target( printer)?',
'text color', 'text container insert',
'text container origin', 'text returned',
'tick mark position', 'time stamp',
'title(d| (cell|font|height|position|rect))?',
'tool tip', 'toolbar', 'trailing offset', 'transparent',
'treat packages as directories', 'truncated labels',
'types', 'unmodified characters', 'update views',
'use sort indicator', 'user defaults',
'uses data source', 'uses ruler',
'uses threaded animation',
'uses title from previous column', 'value wraps',
'version',
'vertical( (line scroll|page scroll|ruler view))?',
'vertically resizable', 'view',
'visible( document rect)?', 'volume', 'width', 'window',
'windows menu', 'wraps', 'zoomable', 'zoomed']
tokens = {
'root': [
(r'\s+', Text),
(ur'¬\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[\(\){}!,.:]', Punctuation),
(ur'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
(ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents),
bygroups(Keyword, Name.Function)),
(r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
(r'\b(as )(%s)\b' % '|'.join(Classes),
bygroups(Keyword, Name.Class)),
(r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
(r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(Control), Keyword),
(r'\b(%s)\b' % '|'.join(Declarations), Keyword),
(r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
(r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
(r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
(r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(References), Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'\b(%s)\b' % Identifiers, Name.Variable),
(r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
(r'[-+]?\d+', Number.Integer),
],
'comment': [
('\(\*', Comment.Multiline, '#push'),
('\*\)', Comment.Multiline, '#pop'),
('[^*(]+', Comment.Multiline),
('[*(]', Comment.Multiline),
],
}
class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
(r'"', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
(r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')"
r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class),
(r"('[\w\+\-\*\/\^]+'|\w+)", Name) ],
'root': [
include('whitespace'),
include('keywords'),
include('functions'),
include('operators'),
include('classes'),
(r'("<html>|<html>)', Name.Tag, 'html-content'),
include('statements')
],
'keywords': [
(r'(algorithm|annotation|break|connect|constant|constrainedby|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'end|equation|exit|expandable|extends|'
r'external|false|final|flow|for|if|import|in|inner|input|'
r'loop|nondiscrete|outer|output|parameter|partial|'
r'protected|public|redeclare|replaceable|stream|time|then|true|'
r'when|while|within)\b', Keyword)
],
'functions': [
(r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|'
r'cross|div|exp|floor|log|log10|mod|rem|sign|sin|sinh|size|'
r'sqrt|tan|tanh|zeros)\b', Name.Function)
],
'operators': [
(r'(and|assert|cardinality|change|delay|der|edge|initial|'
r'noEvent|not|or|pre|reinit|return|sample|smooth|'
r'terminal|terminate)\b', Name.Builtin)
],
'classes': [
(r'(block|class|connector|function|model|package|'
r'record|type)\b', Name.Class)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})',
String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String) # stray backslash
],
'html-content': [
(r'<\s*/\s*html\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)),
]
}
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
*New in Pygments 1.1.*
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
re.IGNORECASE
escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#{[0-9a-fA-F]*}', Number.Hex),
(r'2#{', Number.Hex, 'bin2'),
(r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex),
(r'"', String, 'string'),
(r'{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(\^{^")\s\[\]]+', Name.Decorator),
(r'<[a-zA-Z0-9:._-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+[xX]\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]\(\)]', Generic.Strong),
(r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url
(r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email
(r'comment\s', Comment, 'comment'),
(r'/[^(\^{^")\s/[\]]*', Name.Attribute),
(r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'([^(\^{^")\s]+)', Text),
],
'string': [
(r'[^(\^")]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(\^{^})]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
'stringFile': [
(r'[^(\^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(\^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[\(|\)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([0-1]\s*){8}', Number.Hex),
(r'}', Number.Hex, '#pop'),
],
'comment': [
(r'"', Comment, 'commentString1'),
(r'{', Comment, 'commentString2'),
(r'\[', Comment, 'commentBlock'),
(r'[^(\s{\"\[]+', Comment, '#pop'),
],
'commentString1': [
(r'[^(\^")]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(\^{^})]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'{', Comment, '#push'),
(r'}', Comment, '#pop'),
],
'commentBlock': [
(r'\[',Comment, '#push'),
(r'\]',Comment, '#pop'),
(r'[^(\[\])]*', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
*New in Pygments 1.1.*
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
],
'variable-names': [
(r'<[\S_]+>', Name.Variable),
(r'[\w][\w_~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
#function calls
(r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)',
bygroups(Keyword, Text, Name.Function)),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)([\w_]+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()([\w_]+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w_~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w_\-]+)([=\-]>)([\w_\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w_\-~]+)(?=\()', Name.Function),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS'
r')\b', Keyword),
# keyword kombinations
(r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|'
r'ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|'
r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|'
r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|'
r'NODES|'
r'OBLIGATORY|OF|OFF|ON|OVERLAY|'
r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|'
r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|'
r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|'
r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator),
include('variable-names'),
# standard oparators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
(r'[/;:()\[\],\.]', Punctuation)
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root' : [
(r'\b(Newsqueak2)\b',Keyword.Declaration),
(r"'[^']*'",String),
(r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)',
bygroups(Keyword.Declaration,Text,Name.Class,Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function,Text,Name.Variable)),
(r'([a-zA-Z0-9_]+)(\s*)(=)',
bygroups(Name.Attribute,Text,Operator)),
(r'<[a-zA-Z0-9_]+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+',Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace' : [
(r'\s+', Text),
(r'"[^"]*"', Comment)
]
}
class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://cukes.info/>` syntax.
*New in Pygments 1.2.*
"""
name = 'Gherkin'
aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
feature_keywords_regexp = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функционалност|Функционал|Особина|Могућност|Özellik|Właściwość|Tính năng|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
scenario_keywords_regexp = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарио|Сценарий структураси|Сценарий|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Основа|Концепт|Контекст|Założenia|Tình huống|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_regexp = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A )'
tokens = {
'comments': [
(r'#.*$', Comment),
],
'multiline_descriptions' : [
(step_keywords_regexp, Keyword, "#pop"),
include('comments'),
(r"(\s|.)", Name.Constant),
],
'multiline_descriptions_on_stack' : [
(step_keywords_regexp, Keyword, "#pop:2"),
include('comments'),
(r"(\s|.)", Name.Constant),
],
'scenario_table_description': [
(r"\s+\|", Text, 'scenario_table_header'),
include('comments'),
(r"(\s|.)", Name.Constant),
],
'scenario_table_header': [
(r"\s+\|\s*$", Text, "#pop:2"),
(r"(\s+\|\s*)(#.*)$", bygroups(Text, Comment), "#pop:2"),
include('comments'),
(r"\s+\|", Text),
(r"[^\|]", Name.Variable),
],
'scenario_sections_on_stack': [
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions_on_stack"),
],
'narrative': [
include('scenario_sections_on_stack'),
(r"(\s|.)", Name.Builtin),
],
'table_vars': [
(r'(<[^>]*>)', bygroups(Name.Variable)),
],
'string': [
include('table_vars'),
(r'(\s|.)', String),
],
'py_string': [
(r'"""', String, "#pop"),
include('string'),
],
'double_string': [
(r'"', String, "#pop"),
include('string'),
],
'single_string': [
(r"'", String, "#pop"),
include('string'),
],
'root': [
(r'\n', Text),
include('comments'),
(r'"""', String, "py_string"),
(r'"', String, "double_string"),
(r"'", String, "single_string"),
include('table_vars'),
(r'@[^@\s]+', Name.Namespace),
(step_keywords_regexp, bygroups(Text, Keyword)),
(feature_keywords_regexp,
bygroups(Name.Class, Name.Class, Name.Constant), 'narrative'),
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions"),
(examples_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"scenario_table_description"),
(r'(\s|.)', Text),
]
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
| 46.441598
| 1,414
| 0.499528
|
"""
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Lexers for other languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, do_insertions
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
from pygments.lexers.web import HtmlLexer
__all__ = ['SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'BrainfuckLexer',
'BashLexer', 'BatchLexer', 'BefungeLexer', 'RedcodeLexer',
'MOOCodeLexer', 'SmalltalkLexer', 'TcshLexer', 'LogtalkLexer',
'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer',
'BashSessionLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer',
'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer']
line_re = re.compile('.*?\n')
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_
language.
"""
name = 'Brainfuck'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
tokens = {
'common': [
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
*New in Pygments 0.7.*
"""
name = 'Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[\+\*/%!`-]', Operator),
(r'[<>^v?\[\]rxjk]', Name.Variable),
(r'[:\\$.,n]', Name.Builtin),
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag),
(r'".*?"', String.Double),
(r'\'.', String.Single),
(r'[#;]', Comment),
(r'[pg&~=@iotsy]', Keyword),
(r'[()A-Z]', Comment),
(r'\s+', Text),
],
}
class BashLexer(RegexLexer):
"""
Lexer for (ba)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh']
filenames = ['*.sh', '*.ebuild', '*.eclass']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Text),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\<]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
return shebang_matches(text, r'(ba|z|)sh')
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)?|\[\S+[@:]'
r'[^\n]+\].+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
*New in Pygments 0.8.*
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD',
'JMP','JMZ','JMN','DJN','CMP','SLT','SPL',
'ORG','EQU','END']
modifiers = ['A','B','AB','BA','F','X','I']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
(r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
(r'[A-Za-z_][A-Za-z_0-9]+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}
class MOOCodeLexer(RegexLexer):
"""
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
*New in Pygments 0.9.*
"""
name = 'MOOCode'
filenames = ['*.moo']
aliases = ['moocode']
mimetypes = ['text/x-moocode']
tokens = {
'root' : [
# Numbers
(r'(0|[1-9][0-9_]*)', Number.Integer),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# exceptions
(r'(E_PERM|E_DIV)', Name.Exception),
# db-refs
(r'((
# Keywords
(r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
r'|endwhile|break|continue|return|try'
r'|except|endtry|finally|in)\b', Keyword),
# builtins
(r'(random|length)', Name.Builtin),
# special variables
(r'(player|caller|this|args)', Name.Variable.Instance),
# skip whitespace
(r'\s+', Text),
(r'\n', Text),
# other operators
(r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator),
# function call
(r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)),
# variables
(r'([a-zA-Z_0-9]+)', Text),
]
}
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
*New in Pygments 0.10.*
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak']
mimetypes = ['text/x-smalltalk']
tokens = {
'root' : [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition' : [
# Not perfect can't allow whitespaces at the beginning and the
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables' : [
include('whitespaces'),
(r'(:)(\s*)([A-Za-z\w]+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
(r'', Text, '#pop'),
],
'literals' : [
(r'\'[^\']*\'', String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper' : [
include('whitespaces'),
(r'[-+*/\\~<>=|&
# literals
(r'\'[^\']*\'', String),
(r'\$.', String.Char),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth' : [
(r'\)', String.Symbol, ('root','afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '
include('_parenth_helper'),
],
'whitespaces' : [
# skip whitespace and comments
(r'\s+', Text),
(r'"[^"]*"', Comment),
],
'objects' : [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("[^"]*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject' : [
(r'! !$', Keyword , '
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '
(r'\b[a-zA-Z]+\w*:', Name.Function, '
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '
(r'\.', Punctuation, '
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '
],
'squeak fileout' : [
# Squeak fileout format (optional)
(r'^"[^"]*"!', Keyword),
(r"^'[^']*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r'^(!)(\w+(?: class)?)( methodsFor: )(\'[^\']*\')(.*?!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|'
r'fg|filetest|getxvers|glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|set|shift|'
r'sched|setenv|setpath|settc|setty|setxvers|shift|source|stop|suspend|'
r'source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '
include('root'),
],
'backticks': [
(r'`', String.Backtick, '
include('root'),
],
}
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
*New in Pygments 0.10.*
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s',Punctuation,'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
(r'(after|before)(?=[(])', Keyword),
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])',
Keyword),
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])',
Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
(r'complements_object(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
(r'logtalk_(compile|l(ibrary_path|oad))(?=[(])', Keyword),
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fail|true)\b', Keyword),
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])',
Keyword),
(r'unify_with_occurs_check(?=[(])', Keyword),
(r'(functor|arg|copy_term)(?=[(])', Keyword),
(r'(rem|mod|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
(r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword),
(r'(var|atom(ic)?|integer|float|compound|n(onvar|umber))(?=[(])',
Keyword),
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])',
Keyword),
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
(r'(::|:|\^\^)', Operator),
(r'[{}]', Keyword),
(r'\bonce(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
(r'(>>|<<|/\\|\\\\|\\)', Operator),
(r'\bis\b', Keyword),
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
(r'=\.\.', Operator),
(r'(=|\\=)', Operator),
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
(r'(//|[-+*/])', Operator),
(r'\b(mod|rem)\b', Operator),
(r'\b\*\*\b', Operator),
(r'-->', Operator),
(r'([!;]|->)', Operator),
(r'\\+', Operator),
(r'[?@]', Operator),
(r'"(\\\\|\\"|[^"])*"', String),
(r'[()\[\],.|]', Text),
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
],
'quoted_atom': [
(r"['][']", String),
(r"[']", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]',Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(fo|itialization)(?=[(])', Keyword, 'root'),
(r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|'
r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(calls|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'),
],
'entityrelations': [
(r'(extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])',
Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return True
if ':- protocol(' in text:
return True
if ':- category(' in text:
return True
return False
def _shortened(word):
dpos = word.find('$')
return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1)])
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
*New in Pygments 0.11.*
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
('', Comment, '#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String),
(r'\n', String, '#pop'),
],
'sqstring': [
(r"''", String),
(r"'", String, '
(r"[^\\'\n]+", String),
(r'\\\n', String),
(r'\\', String),
(r'\n', String, '#pop'),
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der",
"box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta",
"data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign",
"fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid",
"hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle",
"la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale",
"mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin",
"rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot",
"mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics",
"nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics",
"mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput",
"pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot",
"poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze",
"st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs",
"ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le",
"v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta",
"yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel",
"yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs",
"x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs",
"zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs",
"x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs",
"noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs",
"xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs",
"noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs",
"cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange",
"y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange",
"vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis",
"zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
*New in Pygments 0.11.*
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(r'
r'include|range|read|render|statistics|switch|undef|version|'
r'warning|while|write|define|macro|local|declare)',
Comment.Preproc),
(r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|'
r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|'
r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|'
r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|'
r'attenuating|average|background|black_hole|blue|blur_samples|'
r'bounded_by|box_mapping|bozo|break|brick|brick_size|'
r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|'
r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|'
r'color|color_map|colour|colour_map|component|composite|concat|'
r'confidence|conic_sweep|constant|control0|control1|cos|cosh|'
r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|'
r'debug|declare|default|degrees|dents|diffuse|direction|'
r'distance|distance_maximum|div|dust|dust_type|eccentricity|'
r'else|emitting|end|error|error_bound|exp|exponent|'
r'fade_distance|fade_power|falloff|falloff_angle|false|'
r'file_exists|filter|finish|fisheye|flatness|flip|floor|'
r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|'
r'global_settings|glowing|gradient|granite|gray_threshold|'
r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|'
r'if|ifdef|iff|image_map|incidence|include|int|interpolate|'
r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|'
r'linear|linear_spline|linear_sweep|location|log|looks_like|'
r'look_at|low_error_factor|mandel|map_type|marble|material_map|'
r'matrix|max|max_intersections|max_iteration|max_trace_level|'
r'max_value|metallic|min|minimum_reuse|mod|mortar|'
r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|'
r'octaves|off|offset|omega|omnimax|on|once|onion|open|'
r'orthographic|panoramic|pattern1|pattern2|pattern3|'
r'perspective|pgm|phase|phong|phong_size|pi|pigment|'
r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|'
r'precision|pwr|quadratic_spline|quaternion|quick_color|'
r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|'
r'ramp_wave|rand|range|reciprocal|recursion_limit|red|'
r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|'
r'right|ripples|rotate|roughness|samples|scale|scallop_wave|'
r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|'
r'slice|slope_map|smooth|specular|spherical_mapping|spiral|'
r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|'
r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|'
r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|'
r'test_camera_4|texture|texture_map|tga|thickness|threshold|'
r'tightness|tile2|tiles|track|transform|translate|transmit|'
r'triangle_wave|true|ttf|turbulence|turb_depth|type|'
r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|'
r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|'
r'vnormalize|volume_object|volume_rendered|vol_with_light|'
r'vrotate|v_steps|warning|warp|water_level|waves|while|width|'
r'wood|wrinkles|yes)\b', Keyword),
(r'bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|'
r'disc|height_field|intersection|julia_fractal|lathe|'
r'light_source|merge|mesh|object|plane|poly|polygon|prism|'
r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|'
r'text|torus|triangle|union', Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Text),
]
}
class AppleScriptLexer(RegexLexer):
"""
For `AppleScript source code
<http://developer.apple.com/documentation/AppleScript/
Conceptual/AppleScriptLangGuide>`_,
including `AppleScript Studio
<http://developer.apple.com/documentation/AppleScript/
Reference/StudioReference>`_.
Contributed by Andreas Amann <aamann@mac.com>.
"""
name = 'AppleScript'
aliases = ['applescript']
filenames = ['*.applescript']
flags = re.MULTILINE | re.DOTALL
Identifiers = r'[a-zA-Z]\w*'
Literals = ['AppleScript', 'current application', 'false', 'linefeed',
'missing value', 'pi','quote', 'result', 'return', 'space',
'tab', 'text item delimiters', 'true', 'version']
Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ',
'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
'real ', 'record ', 'reference ', 'RGB color ', 'script ',
'text ', 'unit types', '(Unicode )?text', 'string']
BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month',
'paragraph', 'word', 'year']
HandlerParams = ['about', 'above', 'against', 'apart from', 'around',
'aside from', 'at', 'below', 'beneath', 'beside',
'between', 'for', 'given', 'instead of', 'on', 'onto',
'out of', 'over', 'since']
Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL',
'choose application', 'choose color', 'choose file( name)?',
'choose folder', 'choose from list',
'choose remote application', 'clipboard info',
'close( access)?', 'copy', 'count', 'current date', 'delay',
'delete', 'display (alert|dialog)', 'do shell script',
'duplicate', 'exists', 'get eof', 'get volume settings',
'info for', 'launch', 'list (disks|folder)', 'load script',
'log', 'make', 'mount volume', 'new', 'offset',
'open( (for access|location))?', 'path to', 'print', 'quit',
'random number', 'read', 'round', 'run( script)?',
'say', 'scripting components',
'set (eof|the clipboard to|volume)', 'store script',
'summarize', 'system attribute', 'system info',
'the clipboard', 'time to GMT', 'write', 'quoted form']
References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
'before', 'behind', 'every', 'front', 'index', 'last',
'middle', 'some', 'that', 'through', 'thru', 'where', 'whose']
Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not",
"isn't", "isn't equal( to)?", "is not equal( to)?",
"doesn't equal", "does not equal", "(is )?greater than",
"comes after", "is not less than or equal( to)?",
"isn't less than or equal( to)?", "(is )?less than",
"comes before", "is not greater than or equal( to)?",
"isn't greater than or equal( to)?",
"(is )?greater than or equal( to)?", "is not less than",
"isn't less than", "does not come before",
"doesn't come before", "(is )?less than or equal( to)?",
"is not greater than", "isn't greater than",
"does not come after", "doesn't come after", "starts? with",
"begins? with", "ends? with", "contains?", "does not contain",
"doesn't contain", "is in", "is contained by", "is not in",
"is not contained by", "isn't contained by", "div", "mod",
"not", "(a )?(ref( to)?|reference to)", "is", "does"]
Control = ['considering', 'else', 'error', 'exit', 'from', 'if',
'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
'try', 'until', 'using terms from', 'while', 'whith',
'with timeout( of)?', 'with transaction', 'by', 'continue',
'end', 'its?', 'me', 'my', 'return', 'of' , 'as']
Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get']
Reserved = ['but', 'put', 'returning', 'the']
StudioClasses = ['action cell', 'alert reply', 'application', 'box',
'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
'clip view', 'color well', 'color-panel',
'combo box( item)?', 'control',
'data( (cell|column|item|row|source))?', 'default entry',
'dialog reply', 'document', 'drag info', 'drawer',
'event', 'font(-panel)?', 'formatter',
'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
'movie( view)?', 'open-panel', 'outline view', 'panel',
'pasteboard', 'plugin', 'popup button',
'progress indicator', 'responder', 'save-panel',
'scroll view', 'secure text field( cell)?', 'slider',
'sound', 'split view', 'stepper', 'tab view( item)?',
'table( (column|header cell|header view|view))',
'text( (field( cell)?|view))?', 'toolbar( item)?',
'user-defaults', 'view', 'window']
StudioEvents = ['accept outline drop', 'accept table drop', 'action',
'activated', 'alert ended', 'awake from nib', 'became key',
'became main', 'begin editing', 'bounds changed',
'cell value', 'cell value changed', 'change cell value',
'change item value', 'changed', 'child of item',
'choose menu item', 'clicked', 'clicked toolbar item',
'closed', 'column clicked', 'column moved',
'column resized', 'conclude drop', 'data representation',
'deminiaturized', 'dialog ended', 'document nib name',
'double clicked', 'drag( (entered|exited|updated))?',
'drop', 'end editing', 'exposed', 'idle', 'item expandable',
'item value', 'item value changed', 'items changed',
'keyboard down', 'keyboard up', 'launched',
'load data representation', 'miniaturized', 'mouse down',
'mouse dragged', 'mouse entered', 'mouse exited',
'mouse moved', 'mouse up', 'moved',
'number of browser rows', 'number of items',
'number of rows', 'open untitled', 'opened', 'panel ended',
'parameters updated', 'plugin loaded', 'prepare drop',
'prepare outline drag', 'prepare outline drop',
'prepare table drag', 'prepare table drop',
'read from file', 'resigned active', 'resigned key',
'resigned main', 'resized( sub views)?',
'right mouse down', 'right mouse dragged',
'right mouse up', 'rows changed', 'scroll wheel',
'selected tab view item', 'selection changed',
'selection changing', 'should begin editing',
'should close', 'should collapse item',
'should end editing', 'should expand item',
'should open( untitled)?',
'should quit( after last window closed)?',
'should select column', 'should select item',
'should select row', 'should select tab view item',
'should selection change', 'should zoom', 'shown',
'update menu item', 'update parameters',
'update toolbar item', 'was hidden', 'was miniaturized',
'will become active', 'will close', 'will dismiss',
'will display browser cell', 'will display cell',
'will display item cell', 'will display outline cell',
'will finish launching', 'will hide', 'will miniaturize',
'will move', 'will open', 'will pop up', 'will quit',
'will resign active', 'will resize( sub views)?',
'will select tab view item', 'will show', 'will zoom',
'write to file', 'zoomed']
StudioCommands = ['animate', 'append', 'call method', 'center',
'close drawer', 'close panel', 'display',
'display alert', 'display dialog', 'display panel', 'go',
'hide', 'highlight', 'increment', 'item for',
'load image', 'load movie', 'load nib', 'load panel',
'load sound', 'localized string', 'lock focus', 'log',
'open drawer', 'path for', 'pause', 'perform action',
'play', 'register', 'resume', 'scroll', 'select( all)?',
'show', 'size to fit', 'start', 'step back',
'step forward', 'stop', 'synchronize', 'unlock focus',
'update']
StudioProperties = ['accepts arrow key', 'action method', 'active',
'alignment', 'allowed identifiers',
'allows branch selection', 'allows column reordering',
'allows column resizing', 'allows column selection',
'allows customization',
'allows editing text attributes',
'allows empty selection', 'allows mixed state',
'allows multiple selection', 'allows reordering',
'allows undo', 'alpha( value)?', 'alternate image',
'alternate increment value', 'alternate title',
'animation delay', 'associated file name',
'associated object', 'auto completes', 'auto display',
'auto enables items', 'auto repeat',
'auto resizes( outline column)?',
'auto save expanded items', 'auto save name',
'auto save table columns', 'auto saves configuration',
'auto scroll', 'auto sizes all columns to fit',
'auto sizes cells', 'background color', 'bezel state',
'bezel style', 'bezeled', 'border rect', 'border type',
'bordered', 'bounds( rotation)?', 'box type',
'button returned', 'button type',
'can choose directories', 'can choose files',
'can draw', 'can hide',
'cell( (background color|size|type))?', 'characters',
'class', 'click count', 'clicked( data)? column',
'clicked data item', 'clicked( data)? row',
'closeable', 'collating', 'color( (mode|panel))',
'command key down', 'configuration',
'content(s| (size|view( margins)?))?', 'context',
'continuous', 'control key down', 'control size',
'control tint', 'control view',
'controller visible', 'coordinate system',
'copies( on scroll)?', 'corner view', 'current cell',
'current column', 'current( field)? editor',
'current( menu)? item', 'current row',
'current tab view item', 'data source',
'default identifiers', 'delta (x|y|z)',
'destination window', 'directory', 'display mode',
'displayed cell', 'document( (edited|rect|view))?',
'double value', 'dragged column', 'dragged distance',
'dragged items', 'draws( cell)? background',
'draws grid', 'dynamically scrolls', 'echos bullets',
'edge', 'editable', 'edited( data)? column',
'edited data item', 'edited( data)? row', 'enabled',
'enclosing scroll view', 'ending page',
'error handling', 'event number', 'event type',
'excluded from windows menu', 'executable path',
'expanded', 'fax number', 'field editor', 'file kind',
'file name', 'file type', 'first responder',
'first visible column', 'flipped', 'floating',
'font( panel)?', 'formatter', 'frameworks path',
'frontmost', 'gave up', 'grid color', 'has data items',
'has horizontal ruler', 'has horizontal scroller',
'has parent data item', 'has resize indicator',
'has shadow', 'has sub menu', 'has vertical ruler',
'has vertical scroller', 'header cell', 'header view',
'hidden', 'hides when deactivated', 'highlights by',
'horizontal line scroll', 'horizontal page scroll',
'horizontal ruler view', 'horizontally resizable',
'icon image', 'id', 'identifier',
'ignores multiple clicks',
'image( (alignment|dims when disabled|frame style|'
'scaling))?',
'imports graphics', 'increment value',
'indentation per level', 'indeterminate', 'index',
'integer value', 'intercell spacing', 'item height',
'key( (code|equivalent( modifier)?|window))?',
'knob thickness', 'label', 'last( visible)? column',
'leading offset', 'leaf', 'level', 'line scroll',
'loaded', 'localized sort', 'location', 'loop mode',
'main( (bunde|menu|window))?', 'marker follows cell',
'matrix mode', 'maximum( content)? size',
'maximum visible columns',
'menu( form representation)?', 'miniaturizable',
'miniaturized', 'minimized image', 'minimized title',
'minimum column width', 'minimum( content)? size',
'modal', 'modified', 'mouse down state',
'movie( (controller|file|rect))?', 'muted', 'name',
'needs display', 'next state', 'next text',
'number of tick marks', 'only tick mark values',
'opaque', 'open panel', 'option key down',
'outline table column', 'page scroll', 'pages across',
'pages down', 'palette label', 'pane splitter',
'parent data item', 'parent window', 'pasteboard',
'path( (names|separator))?', 'playing',
'plays every frame', 'plays selection only', 'position',
'preferred edge', 'preferred type', 'pressure',
'previous text', 'prompt', 'properties',
'prototype cell', 'pulls down', 'rate',
'released when closed', 'repeated',
'requested print time', 'required file type',
'resizable', 'resized column', 'resource path',
'returns records', 'reuses columns', 'rich text',
'roll over', 'row height', 'rulers visible',
'save panel', 'scripts path', 'scrollable',
'selectable( identifiers)?', 'selected cell',
'selected( data)? columns?', 'selected data items?',
'selected( data)? rows?', 'selected item identifier',
'selection by rect', 'send action on arrow key',
'sends action when done editing', 'separates columns',
'separator item', 'sequence number', 'services menu',
'shared frameworks path', 'shared support path',
'sheet', 'shift key down', 'shows alpha',
'shows state by', 'size( mode)?',
'smart insert delete enabled', 'sort case sensitivity',
'sort column', 'sort order', 'sort type',
'sorted( data rows)?', 'sound', 'source( mask)?',
'spell checking enabled', 'starting page', 'state',
'string value', 'sub menu', 'super menu', 'super view',
'tab key traverses cells', 'tab state', 'tab type',
'tab view', 'table view', 'tag', 'target( printer)?',
'text color', 'text container insert',
'text container origin', 'text returned',
'tick mark position', 'time stamp',
'title(d| (cell|font|height|position|rect))?',
'tool tip', 'toolbar', 'trailing offset', 'transparent',
'treat packages as directories', 'truncated labels',
'types', 'unmodified characters', 'update views',
'use sort indicator', 'user defaults',
'uses data source', 'uses ruler',
'uses threaded animation',
'uses title from previous column', 'value wraps',
'version',
'vertical( (line scroll|page scroll|ruler view))?',
'vertically resizable', 'view',
'visible( document rect)?', 'volume', 'width', 'window',
'windows menu', 'wraps', 'zoomable', 'zoomed']
tokens = {
'root': [
(r'\s+', Text),
(ur'¬\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|
(r'\(\*', Comment.Multiline, 'comment'),
(r'[\(\){}!,.:]', Punctuation),
(ur'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
(ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents),
bygroups(Keyword, Name.Function)),
(r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
(r'\b(as )(%s)\b' % '|'.join(Classes),
bygroups(Keyword, Name.Class)),
(r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
(r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(Control), Keyword),
(r'\b(%s)\b' % '|'.join(Declarations), Keyword),
(r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
(r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
(r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
(r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(References), Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'\b(%s)\b' % Identifiers, Name.Variable),
(r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
(r'[-+]?\d+', Number.Integer),
],
'comment': [
('\(\*', Comment.Multiline, '
('\*\)', Comment.Multiline, '
('[^*(]+', Comment.Multiline),
('[*(]', Comment.Multiline),
],
}
class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
(r'"', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
(r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')"
r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class),
(r"('[\w\+\-\*\/\^]+'|\w+)", Name) ],
'root': [
include('whitespace'),
include('keywords'),
include('functions'),
include('operators'),
include('classes'),
(r'("<html>|<html>)', Name.Tag, 'html-content'),
include('statements')
],
'keywords': [
(r'(algorithm|annotation|break|connect|constant|constrainedby|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'end|equation|exit|expandable|extends|'
r'external|false|final|flow|for|if|import|in|inner|input|'
r'loop|nondiscrete|outer|output|parameter|partial|'
r'protected|public|redeclare|replaceable|stream|time|then|true|'
r'when|while|within)\b', Keyword)
],
'functions': [
(r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|'
r'cross|div|exp|floor|log|log10|mod|rem|sign|sin|sinh|size|'
r'sqrt|tan|tanh|zeros)\b', Name.Function)
],
'operators': [
(r'(and|assert|cardinality|change|delay|der|edge|initial|'
r'noEvent|not|or|pre|reinit|return|sample|smooth|'
r'terminal|terminate)\b', Name.Builtin)
],
'classes': [
(r'(block|class|connector|function|model|package|'
r'record|type)\b', Name.Class)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})',
String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String) # stray backslash
],
'html-content': [
(r'<\s*/\s*html\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)),
]
}
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
*New in Pygments 1.1.*
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
re.IGNORECASE
escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'
(r'2
(r'64
(r'"', String, 'string'),
(r'{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(\^{^")\s\[\]]+', Name.Decorator),
(r'<[a-zA-Z0-9:._-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+[xX]\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]\(\)]', Generic.Strong),
(r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator),
(r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator),
(r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator),
(r'comment\s', Comment, 'comment'),
(r'/[^(\^{^")\s/[\]]*', Name.Attribute),
(r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'([^(\^{^")\s]+)', Text),
],
'string': [
(r'[^(\^")]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(\^{^})]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
'stringFile': [
(r'[^(\^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(\^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[\(|\)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([0-1]\s*){8}', Number.Hex),
(r'}', Number.Hex, '#pop'),
],
'comment': [
(r'"', Comment, 'commentString1'),
(r'{', Comment, 'commentString2'),
(r'\[', Comment, 'commentBlock'),
(r'[^(\s{\"\[]+', Comment, '#pop'),
],
'commentString1': [
(r'[^(\^")]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(\^{^})]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'{', Comment, '#push'),
(r'}', Comment, '#pop'),
],
'commentBlock': [
(r'\[',Comment, '#push'),
(r'\]',Comment, '#pop'),
(r'[^(\[\])]*', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
*New in Pygments 1.1.*
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
],
'variable-names': [
(r'<[\S_]+>', Name.Variable),
(r'[\w][\w_~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
#function calls
(r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)',
bygroups(Keyword, Text, Name.Function)),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)([\w_]+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()([\w_]+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w_~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w_\-]+)([=\-]>)([\w_\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w_\-~]+)(?=\()', Name.Function),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS'
r')\b', Keyword),
(r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword),
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|'
r'ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|'
r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|'
r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|'
r'NODES|'
r'OBLIGATORY|OF|OFF|ON|OVERLAY|'
r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|'
r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|'
r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|'
r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword),
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
(r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator),
include('variable-names'),
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
(r'[/;:()\[\],\.]', Punctuation)
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root' : [
(r'\b(Newsqueak2)\b',Keyword.Declaration),
(r"'[^']*'",String),
(r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)',
bygroups(Keyword.Declaration,Text,Name.Class,Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function,Text,Name.Variable)),
(r'([a-zA-Z0-9_]+)(\s*)(=)',
bygroups(Name.Attribute,Text,Operator)),
(r'<[a-zA-Z0-9_]+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+',Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace' : [
(r'\s+', Text),
(r'"[^"]*"', Comment)
]
}
class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://cukes.info/>` syntax.
*New in Pygments 1.2.*
"""
name = 'Gherkin'
aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
feature_keywords_regexp = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функционалност|Функционал|Особина|Могућност|Özellik|Właściwość|Tính năng|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
scenario_keywords_regexp = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарио|Сценарий структураси|Сценарий|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Основа|Концепт|Контекст|Założenia|Tình huống|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_regexp = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A )'
tokens = {
'comments': [
(r'#.*$', Comment),
],
'multiline_descriptions' : [
(step_keywords_regexp, Keyword, "
include('comments'),
(r"(\s|.)", Name.Constant),
],
'multiline_descriptions_on_stack' : [
(step_keywords_regexp, Keyword, "
include('comments'),
(r"(\s|.)", Name.Constant),
],
'scenario_table_description': [
(r"\s+\|", Text, 'scenario_table_header'),
include('comments'),
(r"(\s|.)", Name.Constant),
],
'scenario_table_header': [
(r"\s+\|\s*$", Text, "
(r"(\s+\|\s*)( include('comments'),
(r"\s+\|", Text),
(r"[^\|]", Name.Variable),
],
'scenario_sections_on_stack': [
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions_on_stack"),
],
'narrative': [
include('scenario_sections_on_stack'),
(r"(\s|.)", Name.Builtin),
],
'table_vars': [
(r'(<[^>]*>)', bygroups(Name.Variable)),
],
'string': [
include('table_vars'),
(r'(\s|.)', String),
],
'py_string': [
(r'"""', String, "#pop"),
include('string'),
],
'double_string': [
(r'"', String, "#pop"),
include('string'),
],
'single_string': [
(r"'", String, "#pop"),
include('string'),
],
'root': [
(r'\n', Text),
include('comments'),
(r'"""', String, "py_string"),
(r'"', String, "double_string"),
(r"'", String, "single_string"),
include('table_vars'),
(r'@[^@\s]+', Name.Namespace),
(step_keywords_regexp, bygroups(Text, Keyword)),
(feature_keywords_regexp,
bygroups(Name.Class, Name.Class, Name.Constant), 'narrative'),
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions"),
(examples_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"scenario_table_description"),
(r'(\s|.)', Text),
]
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String),
(r'\\\n', String),
(r'\\n', String),
(r'\\', String),
]
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
| false
| true
|
790244b29d3d1a6a01cad804aec2c352548d496c
| 2,002
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_data_labels17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_data_labels17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_data_labels17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels17.xlsx')
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [45740032, 45747200]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$D$1:$D$5',
'data_labels': {'value': 1, 'position': 'right'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 27.805556
| 79
| 0.535964
| true
| true
|
|
79024601d0939056bfc64b440ae6a7b12adbad9f
| 7,957
|
py
|
Python
|
hard-gists/9c4d012d6fff059ccea7/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/9c4d012d6fff059ccea7/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/9c4d012d6fff059ccea7/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
# -*- coding: utf-8 -*-
#
# Author: oldj
# Email: oldj.wu@gmail.com
# Blog: http://oldj.net
#
import os
import re
import StringIO
from PIL import Image
from PIL import ImageDraw
import pygame
g_script_folder = os.path.dirname(os.path.abspath(__file__))
g_fonts_folder = os.path.join(g_script_folder, "fonts")
g_re_first_word = re.compile((u""
+ u"(%(prefix)s+\S%(postfix)s+)" # 标点
+ u"|(%(prefix)s*\w+%(postfix)s*)" # 单词
+ u"|(%(prefix)s+\S)|(\S%(postfix)s+)" # 标点
+ u"|(\d+%%)" # 百分数
) % {
"prefix": u"['\"\(<\[\{‘“(《「『]",
"postfix": u"[:'\"\)>\]\}:’”)》」』,;\.\?!,、;。?!]",
})
pygame.init()
def getFontForPyGame(font_name="wqy-zenhei.ttc", font_size=14):
return pygame.font.Font(os.path.join(g_fonts_folder, font_name), font_size)
def makeConfig(cfg=None):
if not cfg or type(cfg) != dict:
cfg = {}
default_cfg = {
"width": 440, # px
"padding": (15, 18, 20, 18),
"line-height": 20, #px
"title-line-height": 32, #px
"font-size": 14, # px
"title-font-size": 24, # px
"font-family": "wqy-zenhei.ttc",
# "font-family": "msyh.ttf",
"font-color": (0, 0, 0),
"font-antialiasing": True, # 字体是否反锯齿
"background-color": (255, 255, 255),
"border-size": 1,
"border-color": (192, 192, 192),
"copyright": u"本图文由 txt2.im 自动生成,但不代表 txt2.im 赞同其内容或立场。",
"copyright-center": False, # 版权信息居中显示,如为 False 则居左显示
"first-line-as-title": True,
"break-word": False,
}
default_cfg.update(cfg)
return default_cfg
def makeLineToWordsList(line, break_word=False):
u"""将一行文本转为单词列表"""
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = 1 if not ro else ro.end()
lst.append(line[:end])
line = line[end:]
return lst
def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
u"""将一个长行分成多个可显示的短行"""
txt = long_line
# txt = u"测试汉字abc123"
# txt = txt.decode("utf-8")
if not txt:
return [None]
words = makeLineToWordsList(txt)
lines = []
if not cn_char_width:
cn_char_width, h = font.size(u"汉")
avg_char_per_line = width / cn_char_width
if avg_char_per_line <= 1:
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
wc = len(tmp_words)
while w < width and wc < len(words):
wc += 1
tmp_words = words[:wc]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
while w > width and len(tmp_words) > 1:
tmp_words = tmp_words[:-1]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
if w > width and len(tmp_words) == 1:
# 处理一个长单词或长数字
line_y = makeLongWordToLines(
tmp_words[0], line_x, line_y, width, line_height, font, lines
)
words = words[len(tmp_words):]
continue
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if len(lines) >= 1:
# 去掉长行的第二行开始的行首的空白字符
while len(words) > 0 and not words[0].strip():
words = words[1:]
return lines
def makeLongWordToLines(long_word, line_x, line_y, width, line_height, font, lines):
if not long_word:
return line_y
c = long_word[0]
char_width, char_height = font.size(c)
default_char_num_per_line = width / char_width
while long_word:
tmp_ln = long_word[:default_char_num_per_line]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
while w < width and l < len(long_word):
l += 1
tmp_ln = long_word[:l]
w, h = font.size(tmp_ln)
while w > width and len(tmp_ln) > 1:
tmp_ln = tmp_ln[:-1]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
long_word = long_word[l:]
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
lines.append(line)
return line_y
def makeMatrix(txt, font, title_font, cfg):
width = cfg["width"]
data = {
"width": width,
"height": 0,
"lines": [],
}
a = txt.split("\n")
cur_x = cfg["padding"][3]
cur_y = cfg["padding"][0]
cn_char_width, h = font.size(u"汉")
for ln_idx, ln in enumerate(a):
ln = ln.rstrip()
if ln_idx == 0 and cfg["first-line-as-title"]:
f = title_font
line_height = cfg["title-line-height"]
else:
f = font
line_height = cfg["line-height"]
current_width = width - cur_x - cfg["padding"][1]
lines = makeLongLineToLines(ln, cur_x, cur_y, current_width, line_height, f, cn_char_width=cn_char_width)
cur_y += line_height * len(lines)
data["lines"].extend(lines)
data["height"] = cur_y + cfg["padding"][2]
return data
def makeImage(data, cfg):
u"""
"""
width, height = data["width"], data["height"]
if cfg["copyright"]:
height += 48
im = Image.new("RGB", (width, height), cfg["background-color"])
dr = ImageDraw.Draw(im)
for ln_idx, line in enumerate(data["lines"]):
__makeLine(im, line, cfg)
# dr.text((line["x"], line["y"]), line["text"], font=font, fill=cfg["font-color"])
# 缩放
# im = im.resize((width / 2, height / 2), Image.ANTIALIAS)
drawBorder(im, dr, cfg)
drawCopyright(im, dr, cfg)
return im
def drawCopyright(im, dr, cfg):
u"""绘制版权信息"""
if not cfg["copyright"]:
return
font = getFontForPyGame(font_name=cfg["font-family"], font_size=12)
rtext = font.render(cfg["copyright"],
cfg["font-antialiasing"], (128, 128, 128), cfg["background-color"]
)
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
iw, ih = im.size
cw, ch = rtext.get_size()
padding = cfg["padding"]
offset_y = ih - 32 - padding[2]
if cfg["copyright-center"]:
cx = (iw - cw) / 2
else:
cx = cfg["padding"][3]
cy = offset_y + 12
dr.line([(padding[3], offset_y), (iw - padding[1], offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy))
def drawBorder(im, dr, cfg):
u"""绘制边框"""
if not cfg["border-size"]:
return
w, h = im.size
x, y = w - 1, h - 1
dr.line(
[(0, 0), (x, 0), (x, y), (0, y), (0, 0)],
width=cfg["border-size"],
fill=cfg["border-color"],
)
def __makeLine(im, line, cfg):
if not line:
return
sio = StringIO.StringIO()
x, y = line["x"], line["y"]
text = line["text"]
font = line["font"]
rtext = font.render(text, cfg["font-antialiasing"], cfg["font-color"], cfg["background-color"])
pygame.image.save(rtext, sio)
sio.seek(0)
ln_im = Image.open(sio)
im.paste(ln_im, (x, y))
def txt2im(txt, outfn, cfg=None, show=False):
# print(cfg)
cfg = makeConfig(cfg)
# print(cfg)
font = getFontForPyGame(cfg["font-family"], cfg["font-size"])
title_font = getFontForPyGame(cfg["font-family"], cfg["title-font-size"])
data = makeMatrix(txt, font, title_font, cfg)
im = makeImage(data, cfg)
im.save(outfn)
if os.name == "nt" and show:
im.show()
def test():
c = open("test.txt", "rb").read().decode("utf-8")
txt2im(c, "test.png", show=True)
if __name__ == "__main__":
test()
| 24.259146
| 113
| 0.544803
|
import os
import re
import StringIO
from PIL import Image
from PIL import ImageDraw
import pygame
g_script_folder = os.path.dirname(os.path.abspath(__file__))
g_fonts_folder = os.path.join(g_script_folder, "fonts")
g_re_first_word = re.compile((u""
+ u"(%(prefix)s+\S%(postfix)s+)"
+ u"|(%(prefix)s*\w+%(postfix)s*)"
+ u"|(%(prefix)s+\S)|(\S%(postfix)s+)"
+ u"|(\d+%%)"
) % {
"prefix": u"['\"\(<\[\{‘“(《「『]",
"postfix": u"[:'\"\)>\]\}:’”)》」』,;\.\?!,、;。?!]",
})
pygame.init()
def getFontForPyGame(font_name="wqy-zenhei.ttc", font_size=14):
return pygame.font.Font(os.path.join(g_fonts_folder, font_name), font_size)
def makeConfig(cfg=None):
if not cfg or type(cfg) != dict:
cfg = {}
default_cfg = {
"width": 440,
"padding": (15, 18, 20, 18),
"line-height": 20,
"title-line-height": 32,
"font-size": 14,
"title-font-size": 24,
"font-family": "wqy-zenhei.ttc",
"font-color": (0, 0, 0),
"font-antialiasing": True,
"background-color": (255, 255, 255),
"border-size": 1,
"border-color": (192, 192, 192),
"copyright": u"本图文由 txt2.im 自动生成,但不代表 txt2.im 赞同其内容或立场。",
"copyright-center": False,
"first-line-as-title": True,
"break-word": False,
}
default_cfg.update(cfg)
return default_cfg
def makeLineToWordsList(line, break_word=False):
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = 1 if not ro else ro.end()
lst.append(line[:end])
line = line[end:]
return lst
def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
txt = long_line
if not txt:
return [None]
words = makeLineToWordsList(txt)
lines = []
if not cn_char_width:
cn_char_width, h = font.size(u"汉")
avg_char_per_line = width / cn_char_width
if avg_char_per_line <= 1:
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
wc = len(tmp_words)
while w < width and wc < len(words):
wc += 1
tmp_words = words[:wc]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
while w > width and len(tmp_words) > 1:
tmp_words = tmp_words[:-1]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
if w > width and len(tmp_words) == 1:
line_y = makeLongWordToLines(
tmp_words[0], line_x, line_y, width, line_height, font, lines
)
words = words[len(tmp_words):]
continue
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if len(lines) >= 1:
while len(words) > 0 and not words[0].strip():
words = words[1:]
return lines
def makeLongWordToLines(long_word, line_x, line_y, width, line_height, font, lines):
if not long_word:
return line_y
c = long_word[0]
char_width, char_height = font.size(c)
default_char_num_per_line = width / char_width
while long_word:
tmp_ln = long_word[:default_char_num_per_line]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
while w < width and l < len(long_word):
l += 1
tmp_ln = long_word[:l]
w, h = font.size(tmp_ln)
while w > width and len(tmp_ln) > 1:
tmp_ln = tmp_ln[:-1]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
long_word = long_word[l:]
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
lines.append(line)
return line_y
def makeMatrix(txt, font, title_font, cfg):
width = cfg["width"]
data = {
"width": width,
"height": 0,
"lines": [],
}
a = txt.split("\n")
cur_x = cfg["padding"][3]
cur_y = cfg["padding"][0]
cn_char_width, h = font.size(u"汉")
for ln_idx, ln in enumerate(a):
ln = ln.rstrip()
if ln_idx == 0 and cfg["first-line-as-title"]:
f = title_font
line_height = cfg["title-line-height"]
else:
f = font
line_height = cfg["line-height"]
current_width = width - cur_x - cfg["padding"][1]
lines = makeLongLineToLines(ln, cur_x, cur_y, current_width, line_height, f, cn_char_width=cn_char_width)
cur_y += line_height * len(lines)
data["lines"].extend(lines)
data["height"] = cur_y + cfg["padding"][2]
return data
def makeImage(data, cfg):
width, height = data["width"], data["height"]
if cfg["copyright"]:
height += 48
im = Image.new("RGB", (width, height), cfg["background-color"])
dr = ImageDraw.Draw(im)
for ln_idx, line in enumerate(data["lines"]):
__makeLine(im, line, cfg)
drawBorder(im, dr, cfg)
drawCopyright(im, dr, cfg)
return im
def drawCopyright(im, dr, cfg):
if not cfg["copyright"]:
return
font = getFontForPyGame(font_name=cfg["font-family"], font_size=12)
rtext = font.render(cfg["copyright"],
cfg["font-antialiasing"], (128, 128, 128), cfg["background-color"]
)
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
iw, ih = im.size
cw, ch = rtext.get_size()
padding = cfg["padding"]
offset_y = ih - 32 - padding[2]
if cfg["copyright-center"]:
cx = (iw - cw) / 2
else:
cx = cfg["padding"][3]
cy = offset_y + 12
dr.line([(padding[3], offset_y), (iw - padding[1], offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy))
def drawBorder(im, dr, cfg):
if not cfg["border-size"]:
return
w, h = im.size
x, y = w - 1, h - 1
dr.line(
[(0, 0), (x, 0), (x, y), (0, y), (0, 0)],
width=cfg["border-size"],
fill=cfg["border-color"],
)
def __makeLine(im, line, cfg):
if not line:
return
sio = StringIO.StringIO()
x, y = line["x"], line["y"]
text = line["text"]
font = line["font"]
rtext = font.render(text, cfg["font-antialiasing"], cfg["font-color"], cfg["background-color"])
pygame.image.save(rtext, sio)
sio.seek(0)
ln_im = Image.open(sio)
im.paste(ln_im, (x, y))
def txt2im(txt, outfn, cfg=None, show=False):
cfg = makeConfig(cfg)
font = getFontForPyGame(cfg["font-family"], cfg["font-size"])
title_font = getFontForPyGame(cfg["font-family"], cfg["title-font-size"])
data = makeMatrix(txt, font, title_font, cfg)
im = makeImage(data, cfg)
im.save(outfn)
if os.name == "nt" and show:
im.show()
def test():
c = open("test.txt", "rb").read().decode("utf-8")
txt2im(c, "test.png", show=True)
if __name__ == "__main__":
test()
| true
| true
|
7902465bb7ccfe78def73e81c0df02e3bdd47cd1
| 91
|
py
|
Python
|
tests/test_jogo_banco.py
|
rafaelgarrafiel/jogo_banco
|
26457430c0330fa508b6e5236e5b1aa84bdcb9bf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_jogo_banco.py
|
rafaelgarrafiel/jogo_banco
|
26457430c0330fa508b6e5236e5b1aa84bdcb9bf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_jogo_banco.py
|
rafaelgarrafiel/jogo_banco
|
26457430c0330fa508b6e5236e5b1aa84bdcb9bf
|
[
"Apache-2.0"
] | null | null | null |
from jogo_banco import __version__
def test_version():
assert __version__ == '0.1.0'
| 15.166667
| 34
| 0.725275
|
from jogo_banco import __version__
def test_version():
assert __version__ == '0.1.0'
| true
| true
|
79024695aabf9256233b0f39096a39782c9fbc2d
| 2,883
|
py
|
Python
|
examples/views/persistent.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 290
|
2021-11-03T12:33:16.000Z
|
2022-03-31T19:30:19.000Z
|
examples/views/persistent.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 200
|
2021-11-03T10:41:41.000Z
|
2022-03-31T08:13:11.000Z
|
examples/views/persistent.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 118
|
2021-11-03T18:27:09.000Z
|
2022-03-25T22:00:45.000Z
|
import disnake
from disnake.ext import commands
# Define a simple View that persists between bot restarts
# In order a view to persist between restarts it needs to meet the following conditions:
# 1) The timeout of the View has to be set to None
# 2) Every item in the View has to have a custom_id set
# It is recommended that the custom_id be sufficiently unique to
# prevent conflicts with other buttons the bot sends.
# For this example the custom_id is prefixed with the name of the bot.
# Note that custom_ids can only be up to 100 characters long.
class PersistentView(disnake.ui.View):
def __init__(self):
super().__init__(timeout=None)
@disnake.ui.button(
label="Green", style=disnake.ButtonStyle.green, custom_id="persistent_view:green"
)
async def green(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is green.", ephemeral=True)
@disnake.ui.button(label="Red", style=disnake.ButtonStyle.red, custom_id="persistent_view:red")
async def red(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is red.", ephemeral=True)
@disnake.ui.button(
label="Grey", style=disnake.ButtonStyle.grey, custom_id="persistent_view:grey"
)
async def grey(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is grey.", ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
# Register the persistent view for listening here.
# Note that this does not send the view to any message.
# In order to do this you need to first send a message with the View, which is shown below.
# If you have the message_id you can also pass it as a keyword argument, but for this example
# we don't have one.
self.add_view(PersistentView())
self.persistent_views_added = True
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
"""Starts a persistent view."""
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run("token")
| 42.397059
| 105
| 0.712105
|
import disnake
from disnake.ext import commands
class PersistentView(disnake.ui.View):
def __init__(self):
super().__init__(timeout=None)
@disnake.ui.button(
label="Green", style=disnake.ButtonStyle.green, custom_id="persistent_view:green"
)
async def green(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is green.", ephemeral=True)
@disnake.ui.button(label="Red", style=disnake.ButtonStyle.red, custom_id="persistent_view:red")
async def red(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is red.", ephemeral=True)
@disnake.ui.button(
label="Grey", style=disnake.ButtonStyle.grey, custom_id="persistent_view:grey"
)
async def grey(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is grey.", ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
self.add_view(PersistentView())
self.persistent_views_added = True
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run("token")
| true
| true
|
790246fa9d53ea6e7bb11ceff8b5aef04525933c
| 6,510
|
py
|
Python
|
ironic/drivers/modules/oneview/management.py
|
pyrrrat/moved-ironic
|
93331da82ef13490ccf08f8f9c370e81ca176a41
|
[
"Apache-2.0"
] | null | null | null |
ironic/drivers/modules/oneview/management.py
|
pyrrrat/moved-ironic
|
93331da82ef13490ccf08f8f9c370e81ca176a41
|
[
"Apache-2.0"
] | null | null | null |
ironic/drivers/modules/oneview/management.py
|
pyrrrat/moved-ironic
|
93331da82ef13490ccf08f8f9c370e81ca176a41
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules.oneview import common
LOG = logging.getLogger(__name__)
BOOT_DEVICE_MAPPING_TO_OV = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'PXE',
boot_devices.CDROM: 'CD',
}
BOOT_DEVICE_OV_TO_GENERIC = {
v: k
for k, v in BOOT_DEVICE_MAPPING_TO_OV.items()
}
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
class OneViewManagement(base.ManagementInterface):
def get_properties(self):
return common.COMMON_PROPERTIES
def validate(self, task):
"""Checks required info on 'driver_info' and validates node with OneView
Validates whether the 'driver_info' property of the supplied
task's node contains the required info such as server_hardware_uri,
server_hardware_type, server_profile_template_uri and
enclosure_group_uri. Also, checks if the server profile of the node is
applied, if NICs are valid for the server profile of the node, and if
the server hardware attributes (ram, memory, vcpus count) are
consistent with OneView.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if parameters set are inconsistent with
resources in OneView
"""
common.verify_node_info(task.node)
try:
common.validate_oneview_resources_compatibility(task)
except exception.OneViewError as oneview_exc:
raise exception.InvalidParameterValue(oneview_exc)
def get_supported_boot_devices(self, task):
"""Gets a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return sorted(BOOT_DEVICE_MAPPING_TO_OV.keys())
@task_manager.require_exclusive_lock
@common.node_has_server_profile
def set_boot_device(self, task, device, persistent=False):
"""Sets the boot device for a node.
Sets the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of the supported devices
listed in :mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: OperationNotPermitted if the server has no server profile or
if the server is already powered on.
:raises: OneViewError if the communication with OneView fails
"""
oneview_info = common.get_oneview_info(task.node)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(
_("Invalid boot device %s specified.") % device)
LOG.debug("Setting boot device to %(device)s for node %(node)s",
{"device": device, "node": task.node.uuid})
try:
oneview_client = common.get_oneview_client()
device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)
oneview_client.set_boot_device(oneview_info, device_to_oneview)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error setting boot device on OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(error=msg)
@common.node_has_server_profile
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Provides the current boot device of the node.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
:raises: OperationNotPermitted if no Server Profile is associated with
the node
:raises: InvalidParameterValue if the boot device is unknown
:raises: OneViewError if the communication with OneView fails
"""
oneview_info = common.get_oneview_info(task.node)
try:
oneview_client = common.get_oneview_client()
boot_order = oneview_client.get_boot_order(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error getting boot device from OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(msg)
primary_device = boot_order[0]
if primary_device not in BOOT_DEVICE_OV_TO_GENERIC:
raise exception.InvalidParameterValue(
_("Unsupported boot Device %(device)s for Node: %(node)s")
% {"device": primary_device, "node": task.node.uuid}
)
boot_device = {
'boot_device': BOOT_DEVICE_OV_TO_GENERIC.get(primary_device),
'persistent': True,
}
return boot_device
def get_sensors_data(self, task):
"""Get sensors data.
Not implemented by this driver.
:param task: a TaskManager instance.
"""
raise NotImplementedError()
| 37.630058
| 80
| 0.662366
|
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules.oneview import common
LOG = logging.getLogger(__name__)
BOOT_DEVICE_MAPPING_TO_OV = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'PXE',
boot_devices.CDROM: 'CD',
}
BOOT_DEVICE_OV_TO_GENERIC = {
v: k
for k, v in BOOT_DEVICE_MAPPING_TO_OV.items()
}
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
class OneViewManagement(base.ManagementInterface):
def get_properties(self):
return common.COMMON_PROPERTIES
def validate(self, task):
common.verify_node_info(task.node)
try:
common.validate_oneview_resources_compatibility(task)
except exception.OneViewError as oneview_exc:
raise exception.InvalidParameterValue(oneview_exc)
def get_supported_boot_devices(self, task):
return sorted(BOOT_DEVICE_MAPPING_TO_OV.keys())
@task_manager.require_exclusive_lock
@common.node_has_server_profile
def set_boot_device(self, task, device, persistent=False):
oneview_info = common.get_oneview_info(task.node)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(
_("Invalid boot device %s specified.") % device)
LOG.debug("Setting boot device to %(device)s for node %(node)s",
{"device": device, "node": task.node.uuid})
try:
oneview_client = common.get_oneview_client()
device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)
oneview_client.set_boot_device(oneview_info, device_to_oneview)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error setting boot device on OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(error=msg)
@common.node_has_server_profile
def get_boot_device(self, task):
oneview_info = common.get_oneview_info(task.node)
try:
oneview_client = common.get_oneview_client()
boot_order = oneview_client.get_boot_order(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error getting boot device from OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(msg)
primary_device = boot_order[0]
if primary_device not in BOOT_DEVICE_OV_TO_GENERIC:
raise exception.InvalidParameterValue(
_("Unsupported boot Device %(device)s for Node: %(node)s")
% {"device": primary_device, "node": task.node.uuid}
)
boot_device = {
'boot_device': BOOT_DEVICE_OV_TO_GENERIC.get(primary_device),
'persistent': True,
}
return boot_device
def get_sensors_data(self, task):
raise NotImplementedError()
| true
| true
|
79024718e8b688bb269fb61caf6f9986f5e56403
| 1,093
|
py
|
Python
|
youtubeDataApi/searchApi/cron.py
|
aryamaan98/Youtube-Data-API-Integration
|
7a941c055c637ce285ed47d40b9139446f0b6d89
|
[
"MIT"
] | null | null | null |
youtubeDataApi/searchApi/cron.py
|
aryamaan98/Youtube-Data-API-Integration
|
7a941c055c637ce285ed47d40b9139446f0b6d89
|
[
"MIT"
] | null | null | null |
youtubeDataApi/searchApi/cron.py
|
aryamaan98/Youtube-Data-API-Integration
|
7a941c055c637ce285ed47d40b9139446f0b6d89
|
[
"MIT"
] | null | null | null |
import requests
import json
from datetime import datetime, timezone
from . utils import _extract_videos_necessary_details, _save_video_detils_in_db
from .models import ApiKeys
from . import config
def _get_api_key(): #getting different key w.r.t last used every time cron job starts.(load balanced)
new_key = ApiKeys.objects.all().order_by('last_used').first()
_reponse = ApiKeys.objects.filter(
api_key=new_key.api_key).update(last_used=datetime.now(timezone.utc))
return new_key.api_key
def get_recent_youtube_videos_details():
params = {**config.params}
params.update({'key': _get_api_key()})
print('Prameters: ', params)
youtube_api_response = requests.get(
config.YOUTUBE_SEARCH_URL, params=params)
print('Youtube API Response: ', youtube_api_response.text)
youtube_api_response = json.loads(youtube_api_response.text)
videos_details = _extract_videos_necessary_details(
youtube_api_response.get('items', []))
if videos_details:
_response = _save_video_detils_in_db(videos_details)
return videos_details
| 37.689655
| 101
| 0.755718
|
import requests
import json
from datetime import datetime, timezone
from . utils import _extract_videos_necessary_details, _save_video_detils_in_db
from .models import ApiKeys
from . import config
def _get_api_key():
new_key = ApiKeys.objects.all().order_by('last_used').first()
_reponse = ApiKeys.objects.filter(
api_key=new_key.api_key).update(last_used=datetime.now(timezone.utc))
return new_key.api_key
def get_recent_youtube_videos_details():
params = {**config.params}
params.update({'key': _get_api_key()})
print('Prameters: ', params)
youtube_api_response = requests.get(
config.YOUTUBE_SEARCH_URL, params=params)
print('Youtube API Response: ', youtube_api_response.text)
youtube_api_response = json.loads(youtube_api_response.text)
videos_details = _extract_videos_necessary_details(
youtube_api_response.get('items', []))
if videos_details:
_response = _save_video_detils_in_db(videos_details)
return videos_details
| true
| true
|
79024724e6e005843158e10b491538592925be0e
| 14,087
|
py
|
Python
|
tests/examples/minlplib/ex8_3_13.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/ex8_3_13.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/ex8_3_13.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# NLP written by GAMS Convert at 04/21/18 13:51:47
#
# Equation counts
# Total E G L N X C B
# 73 73 0 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 116 116 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 576 128 448 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x17 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x18 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x19 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x20 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x21 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x22 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x23 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x24 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x25 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x26 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x27 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x28 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x29 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x30 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x31 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x32 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x33 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x34 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x35 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x36 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x42 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x43 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x44 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x45 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x46 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x47 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x48 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x49 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x50 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x51 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x61 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x62 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x63 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x64 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x65 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x66 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x67 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x68 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x69 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x70 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x71 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x72 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x73 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x74 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x75 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x76 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x77 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x78 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x79 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x80 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x81 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x82 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x83 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x84 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x85 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x86 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x87 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x88 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x89 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x90 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x91 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x92 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x93 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x94 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x95 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x96 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x97 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x98 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x99 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x100 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x101 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x102 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x103 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x104 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x105 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x106 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x107 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.obj = Objective(expr= - 100*m.x95 + m.x97 + m.x98 + m.x99 + m.x100 + m.x101, sense=minimize)
m.c2 = Constraint(expr= - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 == -50)
m.c3 = Constraint(expr= - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 == -50)
m.c4 = Constraint(expr= - m.x2 - m.x7 + m.x12 - m.x62 - m.x67 - m.x72 - m.x77 - m.x82 == 0)
m.c5 = Constraint(expr= - m.x3 - m.x8 + m.x13 - m.x63 - m.x68 - m.x73 - m.x78 - m.x83 == 0)
m.c6 = Constraint(expr= - m.x4 - m.x9 + m.x14 - m.x64 - m.x69 - m.x74 - m.x79 - m.x84 == 0)
m.c7 = Constraint(expr= - m.x5 - m.x10 + m.x15 - m.x65 - m.x70 - m.x75 - m.x80 - m.x85 == 0)
m.c8 = Constraint(expr= - m.x6 - m.x11 + m.x16 - m.x66 - m.x71 - m.x76 - m.x81 - m.x86 == 0)
m.c9 = Constraint(expr=m.x17*m.x12 - (m.x42*m.x62 + m.x46*m.x67 + m.x50*m.x72 + m.x54*m.x77 + m.x58*m.x82) - m.x2 == 0)
m.c10 = Constraint(expr=m.x18*m.x12 - (m.x43*m.x62 + m.x47*m.x67 + m.x51*m.x72 + m.x55*m.x77 + m.x59*m.x82) - m.x7 == 0)
m.c11 = Constraint(expr=m.x19*m.x12 - (m.x44*m.x62 + m.x48*m.x67 + m.x52*m.x72 + m.x56*m.x77 + m.x60*m.x82) == 0)
m.c12 = Constraint(expr=m.x20*m.x12 - (m.x45*m.x62 + m.x49*m.x67 + m.x53*m.x72 + m.x57*m.x77 + m.x61*m.x82) == 0)
m.c13 = Constraint(expr=m.x21*m.x13 - (m.x42*m.x63 + m.x46*m.x68 + m.x50*m.x73 + m.x54*m.x78 + m.x58*m.x83) - m.x3 == 0)
m.c14 = Constraint(expr=m.x22*m.x13 - (m.x43*m.x63 + m.x47*m.x68 + m.x51*m.x73 + m.x55*m.x78 + m.x59*m.x83) - m.x8 == 0)
m.c15 = Constraint(expr=m.x23*m.x13 - (m.x44*m.x63 + m.x48*m.x68 + m.x52*m.x73 + m.x56*m.x78 + m.x60*m.x83) == 0)
m.c16 = Constraint(expr=m.x24*m.x13 - (m.x45*m.x63 + m.x49*m.x68 + m.x53*m.x73 + m.x57*m.x78 + m.x61*m.x83) == 0)
m.c17 = Constraint(expr=m.x25*m.x14 - (m.x42*m.x64 + m.x46*m.x69 + m.x50*m.x74 + m.x54*m.x79 + m.x58*m.x84) - m.x4 == 0)
m.c18 = Constraint(expr=m.x26*m.x14 - (m.x43*m.x64 + m.x47*m.x69 + m.x51*m.x74 + m.x55*m.x79 + m.x59*m.x84) - m.x9 == 0)
m.c19 = Constraint(expr=m.x27*m.x14 - (m.x44*m.x64 + m.x48*m.x69 + m.x52*m.x74 + m.x56*m.x79 + m.x60*m.x84) == 0)
m.c20 = Constraint(expr=m.x28*m.x14 - (m.x45*m.x64 + m.x49*m.x69 + m.x53*m.x74 + m.x57*m.x79 + m.x61*m.x84) == 0)
m.c21 = Constraint(expr=m.x29*m.x15 - (m.x42*m.x65 + m.x46*m.x70 + m.x50*m.x75 + m.x54*m.x80 + m.x58*m.x85) - m.x5 == 0)
m.c22 = Constraint(expr=m.x30*m.x15 - (m.x43*m.x65 + m.x47*m.x70 + m.x51*m.x75 + m.x55*m.x80 + m.x59*m.x85) - m.x10
== 0)
m.c23 = Constraint(expr=m.x31*m.x15 - (m.x44*m.x65 + m.x48*m.x70 + m.x52*m.x75 + m.x56*m.x80 + m.x60*m.x85) == 0)
m.c24 = Constraint(expr=m.x32*m.x15 - (m.x45*m.x65 + m.x49*m.x70 + m.x53*m.x75 + m.x57*m.x80 + m.x61*m.x85) == 0)
m.c25 = Constraint(expr=m.x33*m.x16 - (m.x42*m.x66 + m.x46*m.x71 + m.x50*m.x76 + m.x54*m.x81 + m.x58*m.x86) - m.x6 == 0)
m.c26 = Constraint(expr=m.x34*m.x16 - (m.x43*m.x66 + m.x47*m.x71 + m.x51*m.x76 + m.x55*m.x81 + m.x59*m.x86) - m.x11
== 0)
m.c27 = Constraint(expr=m.x35*m.x16 - (m.x44*m.x66 + m.x48*m.x71 + m.x52*m.x76 + m.x56*m.x81 + m.x60*m.x86) == 0)
m.c28 = Constraint(expr=m.x36*m.x16 - (m.x45*m.x66 + m.x49*m.x71 + m.x53*m.x76 + m.x57*m.x81 + m.x61*m.x86) == 0)
m.c29 = Constraint(expr= - m.x12 + m.x37 == 0)
m.c30 = Constraint(expr= - m.x13 + m.x38 == 0)
m.c31 = Constraint(expr= - m.x14 + m.x39 == 0)
m.c32 = Constraint(expr= - m.x15 + m.x40 == 0)
m.c33 = Constraint(expr= - m.x16 + m.x41 == 0)
m.c34 = Constraint(expr=m.x42*m.x37 - (m.x17*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c35 = Constraint(expr=m.x43*m.x37 - (m.x18*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c36 = Constraint(expr=m.x44*m.x37 - (m.x19*m.x12 + m.x97*m.x107) == 0)
m.c37 = Constraint(expr=m.x45*m.x37 - (m.x20*m.x12 + m.x97*m.x108) == 0)
m.c38 = Constraint(expr=m.x46*m.x38 - (m.x21*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c39 = Constraint(expr=m.x47*m.x38 - (m.x22*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c40 = Constraint(expr=m.x48*m.x38 - (m.x23*m.x13 + m.x98*m.x109) == 0)
m.c41 = Constraint(expr=m.x49*m.x38 - (m.x24*m.x13 + m.x98*m.x110) == 0)
m.c42 = Constraint(expr=m.x50*m.x39 - (m.x25*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c43 = Constraint(expr=m.x51*m.x39 - (m.x26*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c44 = Constraint(expr=m.x52*m.x39 - (m.x27*m.x14 + m.x99*m.x111) == 0)
m.c45 = Constraint(expr=m.x53*m.x39 - (m.x28*m.x14 + m.x99*m.x112) == 0)
m.c46 = Constraint(expr=m.x54*m.x40 - (m.x29*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c47 = Constraint(expr=m.x55*m.x40 - (m.x30*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c48 = Constraint(expr=m.x56*m.x40 - (m.x31*m.x15 + m.x100*m.x113) == 0)
m.c49 = Constraint(expr=m.x57*m.x40 - (m.x32*m.x15 + m.x100*m.x114) == 0)
m.c50 = Constraint(expr=m.x58*m.x41 - (m.x33*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c51 = Constraint(expr=m.x59*m.x41 - (m.x34*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c52 = Constraint(expr=m.x60*m.x41 - (m.x35*m.x16 + m.x101*m.x115) == 0)
m.c53 = Constraint(expr=m.x61*m.x41 - (m.x36*m.x16 + m.x101*m.x116) == 0)
m.c54 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x102)*m.x42*m.x43**0.3 + m.x107 == 0)
m.c55 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x103)*m.x46*m.x47**0.3 + m.x109 == 0)
m.c56 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x104)*m.x50*m.x51**0.3 + m.x111 == 0)
m.c57 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x105)*m.x54*m.x55**0.3 + m.x113 == 0)
m.c58 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x106)*m.x58*m.x59**0.3 + m.x115 == 0)
m.c59 = Constraint(expr=-360000*exp(-4815.80271766482/m.x102)*m.x42**0.5*m.x43**1.8 + m.x108 == 0)
m.c60 = Constraint(expr=-360000*exp(-4815.80271766482/m.x103)*m.x46**0.5*m.x47**1.8 + m.x110 == 0)
m.c61 = Constraint(expr=-360000*exp(-4815.80271766482/m.x104)*m.x50**0.5*m.x51**1.8 + m.x112 == 0)
m.c62 = Constraint(expr=-360000*exp(-4815.80271766482/m.x105)*m.x54**0.5*m.x55**1.8 + m.x114 == 0)
m.c63 = Constraint(expr=-360000*exp(-4815.80271766482/m.x106)*m.x58**0.5*m.x59**1.8 + m.x116 == 0)
m.c64 = Constraint(expr= m.x37 - m.x62 - m.x63 - m.x64 - m.x65 - m.x66 - m.x87 == 0)
m.c65 = Constraint(expr= m.x38 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x88 == 0)
m.c66 = Constraint(expr= m.x39 - m.x72 - m.x73 - m.x74 - m.x75 - m.x76 - m.x89 == 0)
m.c67 = Constraint(expr= m.x40 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x90 == 0)
m.c68 = Constraint(expr= m.x41 - m.x82 - m.x83 - m.x84 - m.x85 - m.x86 - m.x91 == 0)
m.c69 = Constraint(expr= - m.x87 - m.x88 - m.x89 - m.x90 - m.x91 + m.x92 == 0)
m.c70 = Constraint(expr=m.x92*m.x93 - (m.x87*m.x42 + m.x88*m.x46 + m.x89*m.x50 + m.x90*m.x54 + m.x91*m.x58) == 0)
m.c71 = Constraint(expr=m.x92*m.x94 - (m.x87*m.x43 + m.x88*m.x47 + m.x89*m.x51 + m.x90*m.x55 + m.x91*m.x59) == 0)
m.c72 = Constraint(expr=m.x92*m.x95 - (m.x87*m.x44 + m.x88*m.x48 + m.x89*m.x52 + m.x90*m.x56 + m.x91*m.x60) == 0)
m.c73 = Constraint(expr=m.x92*m.x96 - (m.x87*m.x45 + m.x88*m.x49 + m.x89*m.x53 + m.x90*m.x57 + m.x91*m.x61) == 0)
| 48.913194
| 120
| 0.632001
|
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x17 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x18 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x19 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x20 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x21 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x22 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x23 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x24 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x25 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x26 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x27 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x28 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x29 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x30 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x31 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x32 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x33 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x34 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x35 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x36 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x42 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x43 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x44 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x45 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x46 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x47 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x48 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x49 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x50 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x51 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x61 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x62 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x63 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x64 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x65 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x66 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x67 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x68 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x69 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x70 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x71 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x72 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x73 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x74 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x75 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x76 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x77 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x78 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x79 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x80 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x81 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x82 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x83 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x84 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x85 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x86 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x87 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x88 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x89 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x90 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x91 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x92 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x93 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x94 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x95 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x96 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x97 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x98 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x99 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x100 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x101 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x102 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x103 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x104 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x105 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x106 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x107 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.obj = Objective(expr= - 100*m.x95 + m.x97 + m.x98 + m.x99 + m.x100 + m.x101, sense=minimize)
m.c2 = Constraint(expr= - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 == -50)
m.c3 = Constraint(expr= - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 == -50)
m.c4 = Constraint(expr= - m.x2 - m.x7 + m.x12 - m.x62 - m.x67 - m.x72 - m.x77 - m.x82 == 0)
m.c5 = Constraint(expr= - m.x3 - m.x8 + m.x13 - m.x63 - m.x68 - m.x73 - m.x78 - m.x83 == 0)
m.c6 = Constraint(expr= - m.x4 - m.x9 + m.x14 - m.x64 - m.x69 - m.x74 - m.x79 - m.x84 == 0)
m.c7 = Constraint(expr= - m.x5 - m.x10 + m.x15 - m.x65 - m.x70 - m.x75 - m.x80 - m.x85 == 0)
m.c8 = Constraint(expr= - m.x6 - m.x11 + m.x16 - m.x66 - m.x71 - m.x76 - m.x81 - m.x86 == 0)
m.c9 = Constraint(expr=m.x17*m.x12 - (m.x42*m.x62 + m.x46*m.x67 + m.x50*m.x72 + m.x54*m.x77 + m.x58*m.x82) - m.x2 == 0)
m.c10 = Constraint(expr=m.x18*m.x12 - (m.x43*m.x62 + m.x47*m.x67 + m.x51*m.x72 + m.x55*m.x77 + m.x59*m.x82) - m.x7 == 0)
m.c11 = Constraint(expr=m.x19*m.x12 - (m.x44*m.x62 + m.x48*m.x67 + m.x52*m.x72 + m.x56*m.x77 + m.x60*m.x82) == 0)
m.c12 = Constraint(expr=m.x20*m.x12 - (m.x45*m.x62 + m.x49*m.x67 + m.x53*m.x72 + m.x57*m.x77 + m.x61*m.x82) == 0)
m.c13 = Constraint(expr=m.x21*m.x13 - (m.x42*m.x63 + m.x46*m.x68 + m.x50*m.x73 + m.x54*m.x78 + m.x58*m.x83) - m.x3 == 0)
m.c14 = Constraint(expr=m.x22*m.x13 - (m.x43*m.x63 + m.x47*m.x68 + m.x51*m.x73 + m.x55*m.x78 + m.x59*m.x83) - m.x8 == 0)
m.c15 = Constraint(expr=m.x23*m.x13 - (m.x44*m.x63 + m.x48*m.x68 + m.x52*m.x73 + m.x56*m.x78 + m.x60*m.x83) == 0)
m.c16 = Constraint(expr=m.x24*m.x13 - (m.x45*m.x63 + m.x49*m.x68 + m.x53*m.x73 + m.x57*m.x78 + m.x61*m.x83) == 0)
m.c17 = Constraint(expr=m.x25*m.x14 - (m.x42*m.x64 + m.x46*m.x69 + m.x50*m.x74 + m.x54*m.x79 + m.x58*m.x84) - m.x4 == 0)
m.c18 = Constraint(expr=m.x26*m.x14 - (m.x43*m.x64 + m.x47*m.x69 + m.x51*m.x74 + m.x55*m.x79 + m.x59*m.x84) - m.x9 == 0)
m.c19 = Constraint(expr=m.x27*m.x14 - (m.x44*m.x64 + m.x48*m.x69 + m.x52*m.x74 + m.x56*m.x79 + m.x60*m.x84) == 0)
m.c20 = Constraint(expr=m.x28*m.x14 - (m.x45*m.x64 + m.x49*m.x69 + m.x53*m.x74 + m.x57*m.x79 + m.x61*m.x84) == 0)
m.c21 = Constraint(expr=m.x29*m.x15 - (m.x42*m.x65 + m.x46*m.x70 + m.x50*m.x75 + m.x54*m.x80 + m.x58*m.x85) - m.x5 == 0)
m.c22 = Constraint(expr=m.x30*m.x15 - (m.x43*m.x65 + m.x47*m.x70 + m.x51*m.x75 + m.x55*m.x80 + m.x59*m.x85) - m.x10
== 0)
m.c23 = Constraint(expr=m.x31*m.x15 - (m.x44*m.x65 + m.x48*m.x70 + m.x52*m.x75 + m.x56*m.x80 + m.x60*m.x85) == 0)
m.c24 = Constraint(expr=m.x32*m.x15 - (m.x45*m.x65 + m.x49*m.x70 + m.x53*m.x75 + m.x57*m.x80 + m.x61*m.x85) == 0)
m.c25 = Constraint(expr=m.x33*m.x16 - (m.x42*m.x66 + m.x46*m.x71 + m.x50*m.x76 + m.x54*m.x81 + m.x58*m.x86) - m.x6 == 0)
m.c26 = Constraint(expr=m.x34*m.x16 - (m.x43*m.x66 + m.x47*m.x71 + m.x51*m.x76 + m.x55*m.x81 + m.x59*m.x86) - m.x11
== 0)
m.c27 = Constraint(expr=m.x35*m.x16 - (m.x44*m.x66 + m.x48*m.x71 + m.x52*m.x76 + m.x56*m.x81 + m.x60*m.x86) == 0)
m.c28 = Constraint(expr=m.x36*m.x16 - (m.x45*m.x66 + m.x49*m.x71 + m.x53*m.x76 + m.x57*m.x81 + m.x61*m.x86) == 0)
m.c29 = Constraint(expr= - m.x12 + m.x37 == 0)
m.c30 = Constraint(expr= - m.x13 + m.x38 == 0)
m.c31 = Constraint(expr= - m.x14 + m.x39 == 0)
m.c32 = Constraint(expr= - m.x15 + m.x40 == 0)
m.c33 = Constraint(expr= - m.x16 + m.x41 == 0)
m.c34 = Constraint(expr=m.x42*m.x37 - (m.x17*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c35 = Constraint(expr=m.x43*m.x37 - (m.x18*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c36 = Constraint(expr=m.x44*m.x37 - (m.x19*m.x12 + m.x97*m.x107) == 0)
m.c37 = Constraint(expr=m.x45*m.x37 - (m.x20*m.x12 + m.x97*m.x108) == 0)
m.c38 = Constraint(expr=m.x46*m.x38 - (m.x21*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c39 = Constraint(expr=m.x47*m.x38 - (m.x22*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c40 = Constraint(expr=m.x48*m.x38 - (m.x23*m.x13 + m.x98*m.x109) == 0)
m.c41 = Constraint(expr=m.x49*m.x38 - (m.x24*m.x13 + m.x98*m.x110) == 0)
m.c42 = Constraint(expr=m.x50*m.x39 - (m.x25*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c43 = Constraint(expr=m.x51*m.x39 - (m.x26*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c44 = Constraint(expr=m.x52*m.x39 - (m.x27*m.x14 + m.x99*m.x111) == 0)
m.c45 = Constraint(expr=m.x53*m.x39 - (m.x28*m.x14 + m.x99*m.x112) == 0)
m.c46 = Constraint(expr=m.x54*m.x40 - (m.x29*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c47 = Constraint(expr=m.x55*m.x40 - (m.x30*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c48 = Constraint(expr=m.x56*m.x40 - (m.x31*m.x15 + m.x100*m.x113) == 0)
m.c49 = Constraint(expr=m.x57*m.x40 - (m.x32*m.x15 + m.x100*m.x114) == 0)
m.c50 = Constraint(expr=m.x58*m.x41 - (m.x33*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c51 = Constraint(expr=m.x59*m.x41 - (m.x34*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c52 = Constraint(expr=m.x60*m.x41 - (m.x35*m.x16 + m.x101*m.x115) == 0)
m.c53 = Constraint(expr=m.x61*m.x41 - (m.x36*m.x16 + m.x101*m.x116) == 0)
m.c54 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x102)*m.x42*m.x43**0.3 + m.x107 == 0)
m.c55 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x103)*m.x46*m.x47**0.3 + m.x109 == 0)
m.c56 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x104)*m.x50*m.x51**0.3 + m.x111 == 0)
m.c57 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x105)*m.x54*m.x55**0.3 + m.x113 == 0)
m.c58 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x106)*m.x58*m.x59**0.3 + m.x115 == 0)
m.c59 = Constraint(expr=-360000*exp(-4815.80271766482/m.x102)*m.x42**0.5*m.x43**1.8 + m.x108 == 0)
m.c60 = Constraint(expr=-360000*exp(-4815.80271766482/m.x103)*m.x46**0.5*m.x47**1.8 + m.x110 == 0)
m.c61 = Constraint(expr=-360000*exp(-4815.80271766482/m.x104)*m.x50**0.5*m.x51**1.8 + m.x112 == 0)
m.c62 = Constraint(expr=-360000*exp(-4815.80271766482/m.x105)*m.x54**0.5*m.x55**1.8 + m.x114 == 0)
m.c63 = Constraint(expr=-360000*exp(-4815.80271766482/m.x106)*m.x58**0.5*m.x59**1.8 + m.x116 == 0)
m.c64 = Constraint(expr= m.x37 - m.x62 - m.x63 - m.x64 - m.x65 - m.x66 - m.x87 == 0)
m.c65 = Constraint(expr= m.x38 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x88 == 0)
m.c66 = Constraint(expr= m.x39 - m.x72 - m.x73 - m.x74 - m.x75 - m.x76 - m.x89 == 0)
m.c67 = Constraint(expr= m.x40 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x90 == 0)
m.c68 = Constraint(expr= m.x41 - m.x82 - m.x83 - m.x84 - m.x85 - m.x86 - m.x91 == 0)
m.c69 = Constraint(expr= - m.x87 - m.x88 - m.x89 - m.x90 - m.x91 + m.x92 == 0)
m.c70 = Constraint(expr=m.x92*m.x93 - (m.x87*m.x42 + m.x88*m.x46 + m.x89*m.x50 + m.x90*m.x54 + m.x91*m.x58) == 0)
m.c71 = Constraint(expr=m.x92*m.x94 - (m.x87*m.x43 + m.x88*m.x47 + m.x89*m.x51 + m.x90*m.x55 + m.x91*m.x59) == 0)
m.c72 = Constraint(expr=m.x92*m.x95 - (m.x87*m.x44 + m.x88*m.x48 + m.x89*m.x52 + m.x90*m.x56 + m.x91*m.x60) == 0)
m.c73 = Constraint(expr=m.x92*m.x96 - (m.x87*m.x45 + m.x88*m.x49 + m.x89*m.x53 + m.x90*m.x57 + m.x91*m.x61) == 0)
| true
| true
|
7902482a6fb74642b39229bc8a84f18d19d023ff
| 2,777
|
py
|
Python
|
tests/utils/test_pnc.py
|
hjmodi/atomic-reactor
|
547f3edd28628dc59a98c4928a0ecf280f5983cb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils/test_pnc.py
|
hjmodi/atomic-reactor
|
547f3edd28628dc59a98c4928a0ecf280f5983cb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils/test_pnc.py
|
hjmodi/atomic-reactor
|
547f3edd28628dc59a98c4928a0ecf280f5983cb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from io import BufferedReader, BytesIO
import pytest
import requests
import responses
from flexmock import flexmock
from atomic_reactor.util import get_retrying_requests_session
from atomic_reactor.utils.pnc import PNCUtil
PNC_BASE_API_URL = 'http://pnc.localhost/pnc-rest/v2'
PNC_GET_SCM_ARCHIVE_PATH = 'builds/{}/scm-archive'
def mock_pnc_map():
return {'base_api_url': PNC_BASE_API_URL,
'get_scm_archive_path': PNC_GET_SCM_ARCHIVE_PATH}
@pytest.mark.usefixtures('user_params')
class TestGetSCMArchiveFromBuildID(object):
@responses.activate
def test_connection_filename_in_header(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename};sf=tgz'
content = b'abc'
reader = BufferedReader(BytesIO(content), buffer_size=1)
# to mock this URL we have to construct it manually first
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body=reader,
status=302, headers={'Location': scm_url})
responses.add(responses.HEAD, scm_url, body='', status=200,
headers={'Content-disposition': f'filename="{filename}"'})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
@responses.activate
def test_connection_filename_in_url(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename}'
# to mock this URL we have to construct it manually first
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body='',
status=302, headers={'Location': scm_url})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
def test_connection_failure(self):
build_id = '1234'
session = get_retrying_requests_session()
(flexmock(session)
.should_receive('get')
.and_raise(requests.exceptions.RetryError))
pnc_util = PNCUtil(mock_pnc_map(), session)
with pytest.raises(requests.exceptions.RetryError):
pnc_util.get_scm_archive_from_build_id(build_id)
| 33.457831
| 95
| 0.695355
|
from io import BufferedReader, BytesIO
import pytest
import requests
import responses
from flexmock import flexmock
from atomic_reactor.util import get_retrying_requests_session
from atomic_reactor.utils.pnc import PNCUtil
PNC_BASE_API_URL = 'http://pnc.localhost/pnc-rest/v2'
PNC_GET_SCM_ARCHIVE_PATH = 'builds/{}/scm-archive'
def mock_pnc_map():
return {'base_api_url': PNC_BASE_API_URL,
'get_scm_archive_path': PNC_GET_SCM_ARCHIVE_PATH}
@pytest.mark.usefixtures('user_params')
class TestGetSCMArchiveFromBuildID(object):
@responses.activate
def test_connection_filename_in_header(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename};sf=tgz'
content = b'abc'
reader = BufferedReader(BytesIO(content), buffer_size=1)
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body=reader,
status=302, headers={'Location': scm_url})
responses.add(responses.HEAD, scm_url, body='', status=200,
headers={'Content-disposition': f'filename="{filename}"'})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
@responses.activate
def test_connection_filename_in_url(self):
build_id = '1234'
filename = 'source.tar.gz'
scm_url = f'https://code.example.com/{filename}'
get_scm_archive_request_url = PNC_BASE_API_URL + '/' + PNC_GET_SCM_ARCHIVE_PATH
responses.add(responses.GET, get_scm_archive_request_url.format(build_id), body='',
status=302, headers={'Location': scm_url})
pnc_util = PNCUtil(mock_pnc_map())
url, dest_filename = pnc_util.get_scm_archive_from_build_id(build_id)
assert url == scm_url
assert dest_filename == filename
def test_connection_failure(self):
build_id = '1234'
session = get_retrying_requests_session()
(flexmock(session)
.should_receive('get')
.and_raise(requests.exceptions.RetryError))
pnc_util = PNCUtil(mock_pnc_map(), session)
with pytest.raises(requests.exceptions.RetryError):
pnc_util.get_scm_archive_from_build_id(build_id)
| true
| true
|
7902487f555dccbcb2b79fb1287dfb6065bbedc3
| 116,721
|
py
|
Python
|
securitycenter/google/cloud/securitycenter_v1beta1/proto/securitycenter_service_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2019-04-16T08:13:06.000Z
|
2019-04-16T08:13:06.000Z
|
securitycenter/google/cloud/securitycenter_v1beta1/proto/securitycenter_service_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | null | null | null |
securitycenter/google/cloud/securitycenter_v1beta1/proto/securitycenter_service_pb2.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2020-11-30T02:23:29.000Z
|
2020-11-30T02:23:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.securitycenter_v1beta1.proto import (
asset_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
finding_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
organization_settings_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
security_marks_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
source_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto",
package="google.cloud.securitycenter.v1beta1",
syntax="proto3",
serialized_pb=_b(
'\nFgoogle/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto\x12#google.cloud.securitycenter.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x35google/cloud/securitycenter_v1beta1/proto/asset.proto\x1a\x37google/cloud/securitycenter_v1beta1/proto/finding.proto\x1a\x45google/cloud/securitycenter_v1beta1/proto/organization_settings.proto\x1a>google/cloud/securitycenter_v1beta1/proto/security_marks.proto\x1a\x36google/cloud/securitycenter_v1beta1/proto/source.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"y\n\x14\x43reateFindingRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\nfinding_id\x18\x02 \x01(\t\x12=\n\x07\x66inding\x18\x03 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding"b\n\x13\x43reateSourceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12;\n\x06source\x18\x02 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source".\n\x1eGetOrganizationSettingsRequest\x12\x0c\n\x04name\x18\x01 \x01(\t" \n\x10GetSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xd1\x01\n\x12GroupAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12\x33\n\x10\x63ompare_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12-\n\tread_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\x08 \x01(\x05"\xa9\x01\n\x13GroupAssetsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x9e\x01\n\x14GroupFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x05 \x01(\t\x12\x11\n\tpage_size\x18\x06 \x01(\x05"\xab\x01\n\x15GroupFindingsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\xbd\x01\n\x0bGroupResult\x12T\n\nproperties\x18\x01 \x03(\x0b\x32@.google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry\x12\r\n\x05\x63ount\x18\x02 \x01(\x03\x1aI\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01"K\n\x12ListSourcesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"l\n\x13ListSourcesResponse\x12<\n\x07sources\x18\x01 \x03(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x80\x02\n\x11ListAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x10\x63ompare_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\nfield_mask\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x08 \x01(\t\x12\x11\n\tpage_size\x18\t \x01(\x05"\xd6\x03\n\x12ListAssetsResponse\x12\x65\n\x13list_assets_results\x18\x01 \x03(\x0b\x32H.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05\x1a\xfc\x01\n\x10ListAssetsResult\x12\x39\n\x05\x61sset\x18\x01 \x01(\x0b\x32*.google.cloud.securitycenter.v1beta1.Asset\x12]\n\x05state\x18\x02 \x01(\x0e\x32N.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State"N\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06UNUSED\x10\x01\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x02\x12\x0b\n\x07REMOVED\x10\x03\x12\n\n\x06\x41\x43TIVE\x10\x04"\xcd\x01\n\x13ListFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nfield_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x06 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"\xb2\x01\n\x14ListFindingsResponse\x12>\n\x08\x66indings\x18\x01 \x03(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05"\x99\x01\n\x16SetFindingStateRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x05state\x18\x02 \x01(\x0e\x32\x32.google.cloud.securitycenter.v1beta1.Finding.State\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"*\n\x18RunAssetDiscoveryRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t"\x86\x01\n\x14UpdateFindingRequest\x12=\n\x07\x66inding\x18\x01 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xae\x01\n!UpdateOrganizationSettingsRequest\x12X\n\x15organization_settings\x18\x01 \x01(\x0b\x32\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x83\x01\n\x13UpdateSourceRequest\x12;\n\x06source\x18\x01 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xc9\x01\n\x1aUpdateSecurityMarksRequest\x12J\n\x0esecurity_marks\x18\x01 \x01(\x0b\x32\x32.google.cloud.securitycenter.v1beta1.SecurityMarks\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x80\x1c\n\x0eSecurityCenter\x12\xb0\x01\n\x0c\x43reateSource\x12\x38.google.cloud.securitycenter.v1beta1.CreateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"9\x82\xd3\xe4\x93\x02\x33")/v1beta1/{parent=organizations/*}/sources:\x06source\x12\xbf\x01\n\rCreateFinding\x12\x39.google.cloud.securitycenter.v1beta1.CreateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"E\x82\xd3\xe4\x93\x02?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\x07\x66inding\x12\x90\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\x01*\x12\xd7\x01\n\x17GetOrganizationSettings\x12\x43.google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{name=organizations/*/organizationSettings}\x12\xa2\x01\n\tGetSource\x12\x35.google.cloud.securitycenter.v1beta1.GetSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{name=organizations/*/sources/*}\x12\xbb\x01\n\x0bGroupAssets\x12\x37.google.cloud.securitycenter.v1beta1.GroupAssetsRequest\x1a\x38.google.cloud.securitycenter.v1beta1.GroupAssetsResponse"9\x82\xd3\xe4\x93\x02\x33"./v1beta1/{parent=organizations/*}/assets:group:\x01*\x12\xcd\x01\n\rGroupFindings\x12\x39.google.cloud.securitycenter.v1beta1.GroupFindingsRequest\x1a:.google.cloud.securitycenter.v1beta1.GroupFindingsResponse"E\x82\xd3\xe4\x93\x02?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\x01*\x12\xaf\x01\n\nListAssets\x12\x36.google.cloud.securitycenter.v1beta1.ListAssetsRequest\x1a\x37.google.cloud.securitycenter.v1beta1.ListAssetsResponse"0\x82\xd3\xe4\x93\x02*\x12(/v1beta1/{parent=organizations/*}/assets\x12\xc1\x01\n\x0cListFindings\x12\x38.google.cloud.securitycenter.v1beta1.ListFindingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.ListFindingsResponse"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{parent=organizations/*/sources/*}/findings\x12\xb3\x01\n\x0bListSources\x12\x37.google.cloud.securitycenter.v1beta1.ListSourcesRequest\x1a\x38.google.cloud.securitycenter.v1beta1.ListSourcesResponse"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{parent=organizations/*}/sources\x12\xb3\x01\n\x11RunAssetDiscovery\x12=.google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest\x1a\x1d.google.longrunning.Operation"@\x82\xd3\xe4\x93\x02:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\x01*\x12\xc6\x01\n\x0fSetFindingState\x12;.google.cloud.securitycenter.v1beta1.SetFindingStateRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"H\x82\xd3\xe4\x93\x02\x42"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\x01*\x12\x90\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\x01*\x12\xb6\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"K\x82\xd3\xe4\x93\x02\x45"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\x01*\x12\xc7\x01\n\rUpdateFinding\x12\x39.google.cloud.securitycenter.v1beta1.UpdateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"M\x82\xd3\xe4\x93\x02G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\x07\x66inding\x12\x8a\x02\n\x1aUpdateOrganizationSettings\x12\x46.google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"i\x82\xd3\xe4\x93\x02\x63\x32J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\x15organization_settings\x12\xb7\x01\n\x0cUpdateSource\x12\x38.google.cloud.securitycenter.v1beta1.UpdateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"@\x82\xd3\xe4\x93\x02:20/v1beta1/{source.name=organizations/*/sources/*}:\x06source\x12\xd0\x02\n\x13UpdateSecurityMarks\x12?.google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest\x1a\x32.google.cloud.securitycenter.v1beta1.SecurityMarks"\xc3\x01\x82\xd3\xe4\x93\x02\xbc\x01\x32\x45/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\x0esecurity_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\x0esecurity_marksB~\n\'com.google.cloud.securitycenter.v1beta1P\x01ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenterb\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNUSED", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REMOVED", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACTIVE", index=4, number=4, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2754,
serialized_end=2832,
)
_sym_db.RegisterEnumDescriptor(_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE)
_CREATEFINDINGREQUEST = _descriptor.Descriptor(
name="CreateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding_id",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=699,
serialized_end=820,
)
_CREATESOURCEREQUEST = _descriptor.Descriptor(
name="CreateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.source",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=822,
serialized_end=920,
)
_GETORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="GetOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=922,
serialized_end=968,
)
_GETSOURCEREQUEST = _descriptor.Descriptor(
name="GetSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=970,
serialized_end=1002,
)
_GROUPASSETSREQUEST = _descriptor.Descriptor(
name="GroupAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.compare_duration",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.read_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_token",
index=5,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_size",
index=6,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1005,
serialized_end=1214,
)
_GROUPASSETSRESPONSE = _descriptor.Descriptor(
name="GroupAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1217,
serialized_end=1386,
)
_GROUPFINDINGSREQUEST = _descriptor.Descriptor(
name="GroupFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_token",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_size",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1389,
serialized_end=1547,
)
_GROUPFINDINGSRESPONSE = _descriptor.Descriptor(
name="GroupFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1550,
serialized_end=1721,
)
_GROUPRESULT_PROPERTIESENTRY = _descriptor.Descriptor(
name="PropertiesEntry",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1840,
serialized_end=1913,
)
_GROUPRESULT = _descriptor.Descriptor(
name="GroupResult",
full_name="google.cloud.securitycenter.v1beta1.GroupResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="properties",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.properties",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="count",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.count",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_GROUPRESULT_PROPERTIESENTRY],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1724,
serialized_end=1913,
)
_LISTSOURCESREQUEST = _descriptor.Descriptor(
name="ListSourcesRequest",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_size",
index=2,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1915,
serialized_end=1990,
)
_LISTSOURCESRESPONSE = _descriptor.Descriptor(
name="ListSourcesResponse",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sources",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.sources",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1992,
serialized_end=2100,
)
_LISTASSETSREQUEST = _descriptor.Descriptor(
name="ListAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.compare_duration",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.field_mask",
index=5,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_token",
index=6,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_size",
index=7,
number=9,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2103,
serialized_end=2359,
)
_LISTASSETSRESPONSE_LISTASSETSRESULT = _descriptor.Descriptor(
name="ListAssetsResult",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="asset",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.asset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2580,
serialized_end=2832,
)
_LISTASSETSRESPONSE = _descriptor.Descriptor(
name="ListAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="list_assets_results",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.list_assets_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2362,
serialized_end=2832,
)
_LISTFINDINGSREQUEST = _descriptor.Descriptor(
name="ListFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.field_mask",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_token",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_size",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2835,
serialized_end=3040,
)
_LISTFINDINGSRESPONSE = _descriptor.Descriptor(
name="ListFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="findings",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.findings",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3043,
serialized_end=3221,
)
_SETFINDINGSTATEREQUEST = _descriptor.Descriptor(
name="SetFindingStateRequest",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3224,
serialized_end=3377,
)
_RUNASSETDISCOVERYREQUEST = _descriptor.Descriptor(
name="RunAssetDiscoveryRequest",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3379,
serialized_end=3421,
)
_UPDATEFINDINGREQUEST = _descriptor.Descriptor(
name="UpdateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.finding",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3424,
serialized_end=3558,
)
_UPDATEORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="UpdateOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="organization_settings",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.organization_settings",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3561,
serialized_end=3735,
)
_UPDATESOURCEREQUEST = _descriptor.Descriptor(
name="UpdateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.source",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3738,
serialized_end=3869,
)
_UPDATESECURITYMARKSREQUEST = _descriptor.Descriptor(
name="UpdateSecurityMarksRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="security_marks",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.security_marks",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3872,
serialized_end=4073,
)
_CREATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_CREATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_GROUPASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GROUPASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPASSETSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPRESULT_PROPERTIESENTRY.fields_by_name[
"value"
].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_GROUPRESULT_PROPERTIESENTRY.containing_type = _GROUPRESULT
_GROUPRESULT.fields_by_name["properties"].message_type = _GROUPRESULT_PROPERTIESENTRY
_LISTSOURCESRESPONSE.fields_by_name[
"sources"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_LISTASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LISTASSETSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"asset"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2._ASSET
)
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"state"
].enum_type = _LISTASSETSRESPONSE_LISTASSETSRESULT_STATE
_LISTASSETSRESPONSE_LISTASSETSRESULT.containing_type = _LISTASSETSRESPONSE
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE.containing_type = (
_LISTASSETSRESPONSE_LISTASSETSRESULT
)
_LISTASSETSRESPONSE.fields_by_name[
"list_assets_results"
].message_type = _LISTASSETSRESPONSE_LISTASSETSRESULT
_LISTASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTFINDINGSRESPONSE.fields_by_name[
"findings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_LISTFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SETFINDINGSTATEREQUEST.fields_by_name[
"state"
].enum_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING_STATE
)
_SETFINDINGSTATEREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_UPDATEFINDINGREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"organization_settings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS
)
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_UPDATESOURCEREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"security_marks"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS
)
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["CreateFindingRequest"] = _CREATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name["CreateSourceRequest"] = _CREATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"GetOrganizationSettingsRequest"
] = _GETORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["GetSourceRequest"] = _GETSOURCEREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsRequest"] = _GROUPASSETSREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsResponse"] = _GROUPASSETSRESPONSE
DESCRIPTOR.message_types_by_name["GroupFindingsRequest"] = _GROUPFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["GroupFindingsResponse"] = _GROUPFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["GroupResult"] = _GROUPRESULT
DESCRIPTOR.message_types_by_name["ListSourcesRequest"] = _LISTSOURCESREQUEST
DESCRIPTOR.message_types_by_name["ListSourcesResponse"] = _LISTSOURCESRESPONSE
DESCRIPTOR.message_types_by_name["ListAssetsRequest"] = _LISTASSETSREQUEST
DESCRIPTOR.message_types_by_name["ListAssetsResponse"] = _LISTASSETSRESPONSE
DESCRIPTOR.message_types_by_name["ListFindingsRequest"] = _LISTFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["ListFindingsResponse"] = _LISTFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["SetFindingStateRequest"] = _SETFINDINGSTATEREQUEST
DESCRIPTOR.message_types_by_name["RunAssetDiscoveryRequest"] = _RUNASSETDISCOVERYREQUEST
DESCRIPTOR.message_types_by_name["UpdateFindingRequest"] = _UPDATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateOrganizationSettingsRequest"
] = _UPDATEORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["UpdateSourceRequest"] = _UPDATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateSecurityMarksRequest"
] = _UPDATESECURITYMARKSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateFindingRequest = _reflection.GeneratedProtocolMessageType(
"CreateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a finding.
Attributes:
parent:
Resource name of the new finding's parent. Its format should
be "organizations/[organization\_id]/sources/[source\_id]".
finding_id:
Unique identifier provided by the client within the parent
scope. It must be alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
finding:
The Finding being created. The name and security\_marks will
be ignored as they are both output only fields on this
resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateFindingRequest)
),
)
_sym_db.RegisterMessage(CreateFindingRequest)
CreateSourceRequest = _reflection.GeneratedProtocolMessageType(
"CreateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a source.
Attributes:
parent:
Resource name of the new source's parent. Its format should be
"organizations/[organization\_id]".
source:
The Source being created, only the display\_name and
description will be used. All other fields will be ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateSourceRequest)
),
)
_sym_db.RegisterMessage(CreateSourceRequest)
GetOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"GetOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting organization settings.
Attributes:
name:
Name of the organization to get organization settings for. Its
format is
"organizations/[organization\_id]/organizationSettings".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(GetOrganizationSettingsRequest)
GetSourceRequest = _reflection.GeneratedProtocolMessageType(
"GetSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting a source.
Attributes:
name:
Relative resource name of the source. Its format is
"organizations/[organization\_id]/source/[source\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetSourceRequest)
),
)
_sym_db.RegisterMessage(GetSourceRequest)
GroupAssetsRequest = _reflection.GeneratedProtocolMessageType(
"GroupAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by assets.
Attributes:
parent:
Name of the organization to groupBy. Its format is
"organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
group_by:
Expression that defines what assets fields to use for
grouping. The string value should follow SQL syntax: comma
separated list of fields. For example: "security\_center\_prop
erties.resource\_project,security\_center\_properties.project"
. The following fields are supported when compare\_duration
is not set: - security\_center\_properties.resource\_project
- security\_center\_properties.resource\_type -
security\_center\_properties.resource\_parent The following
fields are supported when compare\_duration is set: -
security\_center\_properties.resource\_type
compare_duration:
When compare\_duration is set, the Asset's "state" property is
updated to indicate whether the asset was added, removed, or
remained present during the compare\_duration period of time
that precedes the read\_time. This is the time between
(read\_time - compare\_duration) and read\_time. The state
value is derived based on the presence of the asset at the two
points in time. Intermediate state changes between the two
times don't affect the result. For example, the results aren't
affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at reference\_time. -
"REMOVED": indicates that the asset was present at the start
of compare\_duration, but not present at reference\_time. -
"ACTIVE": indicates that the asset was present at both the
start and the end of the time period defined by
compare\_duration and reference\_time. This field is
ignored if ``state`` is not a field in ``group_by``.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupAssetsResponse``;
indicates that this is a continuation of a prior
``GroupAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsRequest)
),
)
_sym_db.RegisterMessage(GroupAssetsRequest)
GroupAssetsResponse = _reflection.GeneratedProtocolMessageType(
"GroupAssetsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for grouping by assets.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsResponse)
),
)
_sym_db.RegisterMessage(GroupAssetsResponse)
GroupFindingsRequest = _reflection.GeneratedProtocolMessageType(
"GroupFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by findings.
Attributes:
parent:
Name of the source to groupBy. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
groupBy across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
group_by:
Expression that defines what assets fields to use for grouping
(including ``state``). The string value should follow SQL
syntax: comma separated list of fields. For example:
"parent,resource\_name". The following fields are supported:
- resource\_name - category - state - parent
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupFindingsResponse``;
indicates that this is a continuation of a prior
``GroupFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsRequest)
),
)
_sym_db.RegisterMessage(GroupFindingsRequest)
GroupFindingsResponse = _reflection.GeneratedProtocolMessageType(
"GroupFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for group by findings.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsResponse)
),
)
_sym_db.RegisterMessage(GroupFindingsResponse)
GroupResult = _reflection.GeneratedProtocolMessageType(
"GroupResult",
(_message.Message,),
dict(
PropertiesEntry=_reflection.GeneratedProtocolMessageType(
"PropertiesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPRESULT_PROPERTIESENTRY,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry)
),
),
DESCRIPTOR=_GROUPRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the properties and count of a groupBy request.
Attributes:
properties:
Properties matching the groupBy fields in the request.
count:
Total count of resources for the given properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult)
),
)
_sym_db.RegisterMessage(GroupResult)
_sym_db.RegisterMessage(GroupResult.PropertiesEntry)
ListSourcesRequest = _reflection.GeneratedProtocolMessageType(
"ListSourcesRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing sources.
Attributes:
parent:
Resource name of the parent of sources to list. Its format
should be "organizations/[organization\_id]".
page_token:
The value returned by the last ``ListSourcesResponse``;
indicates that this is a continuation of a prior
``ListSources`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesRequest)
),
)
_sym_db.RegisterMessage(ListSourcesRequest)
ListSourcesResponse = _reflection.GeneratedProtocolMessageType(
"ListSourcesResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing sources.
Attributes:
sources:
Sources belonging to the requested parent.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesResponse)
),
)
_sym_db.RegisterMessage(ListSourcesResponse)
ListAssetsRequest = _reflection.GeneratedProtocolMessageType(
"ListAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing assets.
Attributes:
parent:
Name of the organization assets should belong to. Its format
is "organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,resource\_properties.a\_property".
Redundant space characters in the syntax are insignificant.
"name desc,resource\_properties.a\_property" and " name desc ,
resource\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
compare_duration:
When compare\_duration is set, the ListAssetResult's "state"
attribute is updated to indicate whether the asset was added,
removed, or remained present during the compare\_duration
period of time that precedes the read\_time. This is the time
between (read\_time - compare\_duration) and read\_time. The
state value is derived based on the presence of the asset at
the two points in time. Intermediate state changes between the
two times don't affect the result. For example, the results
aren't affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at read\_time. - "REMOVED":
indicates that the asset was present at the start of
compare\_duration, but not present at read\_time. - "ACTIVE":
indicates that the asset was present at both the start and
the end of the time period defined by compare\_duration and
read\_time. If compare\_duration is not specified, then the
only possible state is "UNUSED", which indicates that the
asset is present at read\_time.
field_mask:
Optional. A field mask to specify the ListAssetsResult fields
to be listed in the response. An empty field mask will list
all fields.
page_token:
The value returned by the last ``ListAssetsResponse``;
indicates that this is a continuation of a prior
``ListAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsRequest)
),
)
_sym_db.RegisterMessage(ListAssetsRequest)
ListAssetsResponse = _reflection.GeneratedProtocolMessageType(
"ListAssetsResponse",
(_message.Message,),
dict(
ListAssetsResult=_reflection.GeneratedProtocolMessageType(
"ListAssetsResult",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSRESPONSE_LISTASSETSRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the Asset and its State.
Attributes:
asset:
Asset matching the search request.
state:
State of the asset.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult)
),
),
DESCRIPTOR=_LISTASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing assets.
Attributes:
list_assets_results:
Assets matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of assets matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse)
),
)
_sym_db.RegisterMessage(ListAssetsResponse)
_sym_db.RegisterMessage(ListAssetsResponse.ListAssetsResult)
ListFindingsRequest = _reflection.GeneratedProtocolMessageType(
"ListFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing findings.
Attributes:
parent:
Name of the source the findings belong to. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
list across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,source\_properties.a\_property". Redundant
space characters in the syntax are insignificant. "name
desc,source\_properties.a\_property" and " name desc ,
source\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
field_mask:
Optional. A field mask to specify the Finding fields to be
listed in the response. An empty field mask will list all
fields.
page_token:
The value returned by the last ``ListFindingsResponse``;
indicates that this is a continuation of a prior
``ListFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsRequest)
),
)
_sym_db.RegisterMessage(ListFindingsRequest)
ListFindingsResponse = _reflection.GeneratedProtocolMessageType(
"ListFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing findings.
Attributes:
findings:
Findings matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of findings matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsResponse)
),
)
_sym_db.RegisterMessage(ListFindingsResponse)
SetFindingStateRequest = _reflection.GeneratedProtocolMessageType(
"SetFindingStateRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SETFINDINGSTATEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a finding's state.
Attributes:
name:
The relative resource name of the finding. See: https://cloud.
google.com/apis/design/resource\_names#relative\_resource\_nam
e Example: "organizations/123/sources/456/finding/789".
state:
The desired State of the finding.
start_time:
The time at which the updated state takes effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.SetFindingStateRequest)
),
)
_sym_db.RegisterMessage(SetFindingStateRequest)
RunAssetDiscoveryRequest = _reflection.GeneratedProtocolMessageType(
"RunAssetDiscoveryRequest",
(_message.Message,),
dict(
DESCRIPTOR=_RUNASSETDISCOVERYREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for running asset discovery for an organization.
Attributes:
parent:
Name of the organization to run asset discovery for. Its
format is "organizations/[organization\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest)
),
)
_sym_db.RegisterMessage(RunAssetDiscoveryRequest)
UpdateFindingRequest = _reflection.GeneratedProtocolMessageType(
"UpdateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating or creating a finding.
Attributes:
finding:
The finding resource to update or create if it does not
already exist. parent, security\_marks, and update\_time will
be ignored. In the case of creation, the finding id portion
of the name must alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
update_mask:
The FieldMask to use when updating the finding resource. This
field is ignored if the finding does not already exist and the
finding is created.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateFindingRequest)
),
)
_sym_db.RegisterMessage(UpdateFindingRequest)
UpdateOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"UpdateOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating an organization's settings.
Attributes:
organization_settings:
The organization settings resource to update.
update_mask:
The FieldMask to use when updating the settings resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(UpdateOrganizationSettingsRequest)
UpdateSourceRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a source.
Attributes:
source:
The source resource to update.
update_mask:
The FieldMask to use when updating the source resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSourceRequest)
),
)
_sym_db.RegisterMessage(UpdateSourceRequest)
UpdateSecurityMarksRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSecurityMarksRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESECURITYMARKSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a SecurityMarks resource.
Attributes:
security_marks:
The security marks resource to update.
update_mask:
The FieldMask to use when updating the security marks
resource.
start_time:
The time at which the updated SecurityMarks take effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest)
),
)
_sym_db.RegisterMessage(UpdateSecurityMarksRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n'com.google.cloud.securitycenter.v1beta1P\001ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenter"
),
)
_GROUPRESULT_PROPERTIESENTRY.has_options = True
_GROUPRESULT_PROPERTIESENTRY._options = _descriptor._ParseOptions(
descriptor_pb2.MessageOptions(), _b("8\001")
)
_SECURITYCENTER = _descriptor.ServiceDescriptor(
name="SecurityCenter",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter",
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=4076,
serialized_end=7660,
methods=[
_descriptor.MethodDescriptor(
name="CreateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateSource",
index=0,
containing_service=None,
input_type=_CREATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023")/v1beta1/{parent=organizations/*}/sources:\006source'
),
),
),
_descriptor.MethodDescriptor(
name="CreateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateFinding",
index=1,
containing_service=None,
input_type=_CREATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\007finding'
),
),
),
_descriptor.MethodDescriptor(
name="GetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetIamPolicy",
index=2,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GetOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetOrganizationSettings",
index=3,
containing_service=None,
input_type=_GETORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{name=organizations/*/organizationSettings}"
),
),
),
_descriptor.MethodDescriptor(
name="GetSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetSource",
index=4,
containing_service=None,
input_type=_GETSOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{name=organizations/*/sources/*}"
),
),
),
_descriptor.MethodDescriptor(
name="GroupAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupAssets",
index=5,
containing_service=None,
input_type=_GROUPASSETSREQUEST,
output_type=_GROUPASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023"./v1beta1/{parent=organizations/*}/assets:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GroupFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupFindings",
index=6,
containing_service=None,
input_type=_GROUPFINDINGSREQUEST,
output_type=_GROUPFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="ListAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListAssets",
index=7,
containing_service=None,
input_type=_LISTASSETSREQUEST,
output_type=_LISTASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002*\022(/v1beta1/{parent=organizations/*}/assets"
),
),
),
_descriptor.MethodDescriptor(
name="ListFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListFindings",
index=8,
containing_service=None,
input_type=_LISTFINDINGSREQUEST,
output_type=_LISTFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{parent=organizations/*/sources/*}/findings"
),
),
),
_descriptor.MethodDescriptor(
name="ListSources",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListSources",
index=9,
containing_service=None,
input_type=_LISTSOURCESREQUEST,
output_type=_LISTSOURCESRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{parent=organizations/*}/sources"
),
),
),
_descriptor.MethodDescriptor(
name="RunAssetDiscovery",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.RunAssetDiscovery",
index=10,
containing_service=None,
input_type=_RUNASSETDISCOVERYREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetFindingState",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetFindingState",
index=11,
containing_service=None,
input_type=_SETFINDINGSTATEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002B"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetIamPolicy",
index=12,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="TestIamPermissions",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.TestIamPermissions",
index=13,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST,
output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002E"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="UpdateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateFinding",
index=14,
containing_service=None,
input_type=_UPDATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\007finding"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateOrganizationSettings",
index=15,
containing_service=None,
input_type=_UPDATEORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002c2J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\025organization_settings"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSource",
index=16,
containing_service=None,
input_type=_UPDATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002:20/v1beta1/{source.name=organizations/*/sources/*}:\006source"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSecurityMarks",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSecurityMarks",
index=17,
containing_service=None,
input_type=_UPDATESECURITYMARKSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002\274\0012E/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\016security_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\016security_marks"
),
),
),
],
)
_sym_db.RegisterServiceDescriptor(_SECURITYCENTER)
DESCRIPTOR.services_by_name["SecurityCenter"] = _SECURITYCENTER
# @@protoc_insertion_point(module_scope)
| 38.48368
| 10,592
| 0.64356
|
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.securitycenter_v1beta1.proto import (
asset_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
finding_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
organization_settings_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
security_marks_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2,
)
from google.cloud.securitycenter_v1beta1.proto import (
source_pb2 as google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto",
package="google.cloud.securitycenter.v1beta1",
syntax="proto3",
serialized_pb=_b(
'\nFgoogle/cloud/securitycenter_v1beta1/proto/securitycenter_service.proto\x12#google.cloud.securitycenter.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x35google/cloud/securitycenter_v1beta1/proto/asset.proto\x1a\x37google/cloud/securitycenter_v1beta1/proto/finding.proto\x1a\x45google/cloud/securitycenter_v1beta1/proto/organization_settings.proto\x1a>google/cloud/securitycenter_v1beta1/proto/security_marks.proto\x1a\x36google/cloud/securitycenter_v1beta1/proto/source.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"y\n\x14\x43reateFindingRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\nfinding_id\x18\x02 \x01(\t\x12=\n\x07\x66inding\x18\x03 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding"b\n\x13\x43reateSourceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12;\n\x06source\x18\x02 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source".\n\x1eGetOrganizationSettingsRequest\x12\x0c\n\x04name\x18\x01 \x01(\t" \n\x10GetSourceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xd1\x01\n\x12GroupAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12\x33\n\x10\x63ompare_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12-\n\tread_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x07 \x01(\t\x12\x11\n\tpage_size\x18\x08 \x01(\x05"\xa9\x01\n\x13GroupAssetsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x9e\x01\n\x14GroupFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08group_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npage_token\x18\x05 \x01(\t\x12\x11\n\tpage_size\x18\x06 \x01(\x05"\xab\x01\n\x15GroupFindingsResponse\x12J\n\x10group_by_results\x18\x01 \x03(\x0b\x32\x30.google.cloud.securitycenter.v1beta1.GroupResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\xbd\x01\n\x0bGroupResult\x12T\n\nproperties\x18\x01 \x03(\x0b\x32@.google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry\x12\r\n\x05\x63ount\x18\x02 \x01(\x03\x1aI\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01"K\n\x12ListSourcesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"l\n\x13ListSourcesResponse\x12<\n\x07sources\x18\x01 \x03(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x80\x02\n\x11ListAssetsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x33\n\x10\x63ompare_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\nfield_mask\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x08 \x01(\t\x12\x11\n\tpage_size\x18\t \x01(\x05"\xd6\x03\n\x12ListAssetsResponse\x12\x65\n\x13list_assets_results\x18\x01 \x03(\x0b\x32H.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05\x1a\xfc\x01\n\x10ListAssetsResult\x12\x39\n\x05\x61sset\x18\x01 \x01(\x0b\x32*.google.cloud.securitycenter.v1beta1.Asset\x12]\n\x05state\x18\x02 \x01(\x0e\x32N.google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State"N\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\n\n\x06UNUSED\x10\x01\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x02\x12\x0b\n\x07REMOVED\x10\x03\x12\n\n\x06\x41\x43TIVE\x10\x04"\xcd\x01\n\x13ListFindingsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nfield_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x12\n\npage_token\x18\x06 \x01(\t\x12\x11\n\tpage_size\x18\x07 \x01(\x05"\xb2\x01\n\x14ListFindingsResponse\x12>\n\x08\x66indings\x18\x01 \x03(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12-\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\x12\x12\n\ntotal_size\x18\x04 \x01(\x05"\x99\x01\n\x16SetFindingStateRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x05state\x18\x02 \x01(\x0e\x32\x32.google.cloud.securitycenter.v1beta1.Finding.State\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"*\n\x18RunAssetDiscoveryRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t"\x86\x01\n\x14UpdateFindingRequest\x12=\n\x07\x66inding\x18\x01 \x01(\x0b\x32,.google.cloud.securitycenter.v1beta1.Finding\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xae\x01\n!UpdateOrganizationSettingsRequest\x12X\n\x15organization_settings\x18\x01 \x01(\x0b\x32\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x83\x01\n\x13UpdateSourceRequest\x12;\n\x06source\x18\x01 \x01(\x0b\x32+.google.cloud.securitycenter.v1beta1.Source\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xc9\x01\n\x1aUpdateSecurityMarksRequest\x12J\n\x0esecurity_marks\x18\x01 \x01(\x0b\x32\x32.google.cloud.securitycenter.v1beta1.SecurityMarks\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x80\x1c\n\x0eSecurityCenter\x12\xb0\x01\n\x0c\x43reateSource\x12\x38.google.cloud.securitycenter.v1beta1.CreateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"9\x82\xd3\xe4\x93\x02\x33")/v1beta1/{parent=organizations/*}/sources:\x06source\x12\xbf\x01\n\rCreateFinding\x12\x39.google.cloud.securitycenter.v1beta1.CreateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"E\x82\xd3\xe4\x93\x02?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\x07\x66inding\x12\x90\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\x01*\x12\xd7\x01\n\x17GetOrganizationSettings\x12\x43.google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{name=organizations/*/organizationSettings}\x12\xa2\x01\n\tGetSource\x12\x35.google.cloud.securitycenter.v1beta1.GetSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{name=organizations/*/sources/*}\x12\xbb\x01\n\x0bGroupAssets\x12\x37.google.cloud.securitycenter.v1beta1.GroupAssetsRequest\x1a\x38.google.cloud.securitycenter.v1beta1.GroupAssetsResponse"9\x82\xd3\xe4\x93\x02\x33"./v1beta1/{parent=organizations/*}/assets:group:\x01*\x12\xcd\x01\n\rGroupFindings\x12\x39.google.cloud.securitycenter.v1beta1.GroupFindingsRequest\x1a:.google.cloud.securitycenter.v1beta1.GroupFindingsResponse"E\x82\xd3\xe4\x93\x02?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\x01*\x12\xaf\x01\n\nListAssets\x12\x36.google.cloud.securitycenter.v1beta1.ListAssetsRequest\x1a\x37.google.cloud.securitycenter.v1beta1.ListAssetsResponse"0\x82\xd3\xe4\x93\x02*\x12(/v1beta1/{parent=organizations/*}/assets\x12\xc1\x01\n\x0cListFindings\x12\x38.google.cloud.securitycenter.v1beta1.ListFindingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.ListFindingsResponse"<\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta1/{parent=organizations/*/sources/*}/findings\x12\xb3\x01\n\x0bListSources\x12\x37.google.cloud.securitycenter.v1beta1.ListSourcesRequest\x1a\x38.google.cloud.securitycenter.v1beta1.ListSourcesResponse"1\x82\xd3\xe4\x93\x02+\x12)/v1beta1/{parent=organizations/*}/sources\x12\xb3\x01\n\x11RunAssetDiscovery\x12=.google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest\x1a\x1d.google.longrunning.Operation"@\x82\xd3\xe4\x93\x02:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\x01*\x12\xc6\x01\n\x0fSetFindingState\x12;.google.cloud.securitycenter.v1beta1.SetFindingStateRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"H\x82\xd3\xe4\x93\x02\x42"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\x01*\x12\x90\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"E\x82\xd3\xe4\x93\x02?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\x01*\x12\xb6\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"K\x82\xd3\xe4\x93\x02\x45"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\x01*\x12\xc7\x01\n\rUpdateFinding\x12\x39.google.cloud.securitycenter.v1beta1.UpdateFindingRequest\x1a,.google.cloud.securitycenter.v1beta1.Finding"M\x82\xd3\xe4\x93\x02G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\x07\x66inding\x12\x8a\x02\n\x1aUpdateOrganizationSettings\x12\x46.google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest\x1a\x39.google.cloud.securitycenter.v1beta1.OrganizationSettings"i\x82\xd3\xe4\x93\x02\x63\x32J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\x15organization_settings\x12\xb7\x01\n\x0cUpdateSource\x12\x38.google.cloud.securitycenter.v1beta1.UpdateSourceRequest\x1a+.google.cloud.securitycenter.v1beta1.Source"@\x82\xd3\xe4\x93\x02:20/v1beta1/{source.name=organizations/*/sources/*}:\x06source\x12\xd0\x02\n\x13UpdateSecurityMarks\x12?.google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest\x1a\x32.google.cloud.securitycenter.v1beta1.SecurityMarks"\xc3\x01\x82\xd3\xe4\x93\x02\xbc\x01\x32\x45/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\x0esecurity_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\x0esecurity_marksB~\n\'com.google.cloud.securitycenter.v1beta1P\x01ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenterb\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2.DESCRIPTOR,
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,
google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,
google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="UNUSED", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REMOVED", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACTIVE", index=4, number=4, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2754,
serialized_end=2832,
)
_sym_db.RegisterEnumDescriptor(_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE)
_CREATEFINDINGREQUEST = _descriptor.Descriptor(
name="CreateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding_id",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.CreateFindingRequest.finding",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=699,
serialized_end=820,
)
_CREATESOURCEREQUEST = _descriptor.Descriptor(
name="CreateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.CreateSourceRequest.source",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=822,
serialized_end=920,
)
_GETORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="GetOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=922,
serialized_end=968,
)
_GETSOURCEREQUEST = _descriptor.Descriptor(
name="GetSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.GetSourceRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=970,
serialized_end=1002,
)
_GROUPASSETSREQUEST = _descriptor.Descriptor(
name="GroupAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.compare_duration",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.read_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_token",
index=5,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsRequest.page_size",
index=6,
number=8,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1005,
serialized_end=1214,
)
_GROUPASSETSRESPONSE = _descriptor.Descriptor(
name="GroupAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1217,
serialized_end=1386,
)
_GROUPFINDINGSREQUEST = _descriptor.Descriptor(
name="GroupFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="group_by",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.group_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_token",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsRequest.page_size",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1389,
serialized_end=1547,
)
_GROUPFINDINGSRESPONSE = _descriptor.Descriptor(
name="GroupFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="group_by_results",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.group_by_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.GroupFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1550,
serialized_end=1721,
)
_GROUPRESULT_PROPERTIESENTRY = _descriptor.Descriptor(
name="PropertiesEntry",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1840,
serialized_end=1913,
)
_GROUPRESULT = _descriptor.Descriptor(
name="GroupResult",
full_name="google.cloud.securitycenter.v1beta1.GroupResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="properties",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.properties",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="count",
full_name="google.cloud.securitycenter.v1beta1.GroupResult.count",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_GROUPRESULT_PROPERTIESENTRY],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1724,
serialized_end=1913,
)
_LISTSOURCESREQUEST = _descriptor.Descriptor(
name="ListSourcesRequest",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesRequest.page_size",
index=2,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1915,
serialized_end=1990,
)
_LISTSOURCESRESPONSE = _descriptor.Descriptor(
name="ListSourcesResponse",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="sources",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.sources",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListSourcesResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1992,
serialized_end=2100,
)
_LISTASSETSREQUEST = _descriptor.Descriptor(
name="ListAssetsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compare_duration",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.compare_duration",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.field_mask",
index=5,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_token",
index=6,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsRequest.page_size",
index=7,
number=9,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2103,
serialized_end=2359,
)
_LISTASSETSRESPONSE_LISTASSETSRESULT = _descriptor.Descriptor(
name="ListAssetsResult",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="asset",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.asset",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2580,
serialized_end=2832,
)
_LISTASSETSRESPONSE = _descriptor.Descriptor(
name="ListAssetsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="list_assets_results",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.list_assets_results",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListAssetsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_LISTASSETSRESPONSE_LISTASSETSRESULT],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2362,
serialized_end=2832,
)
_LISTFINDINGSREQUEST = _descriptor.Descriptor(
name="ListFindingsRequest",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.filter",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="order_by",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.order_by",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.read_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="field_mask",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.field_mask",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_token",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsRequest.page_size",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2835,
serialized_end=3040,
)
_LISTFINDINGSRESPONSE = _descriptor.Descriptor(
name="ListFindingsResponse",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="findings",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.findings",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.read_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.next_page_token",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="total_size",
full_name="google.cloud.securitycenter.v1beta1.ListFindingsResponse.total_size",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3043,
serialized_end=3221,
)
_SETFINDINGSTATEREQUEST = _descriptor.Descriptor(
name="SetFindingStateRequest",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.state",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.SetFindingStateRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3224,
serialized_end=3377,
)
_RUNASSETDISCOVERYREQUEST = _descriptor.Descriptor(
name="RunAssetDiscoveryRequest",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="parent",
full_name="google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest.parent",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3379,
serialized_end=3421,
)
_UPDATEFINDINGREQUEST = _descriptor.Descriptor(
name="UpdateFindingRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="finding",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.finding",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateFindingRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3424,
serialized_end=3558,
)
_UPDATEORGANIZATIONSETTINGSREQUEST = _descriptor.Descriptor(
name="UpdateOrganizationSettingsRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="organization_settings",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.organization_settings",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3561,
serialized_end=3735,
)
_UPDATESOURCEREQUEST = _descriptor.Descriptor(
name="UpdateSourceRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.source",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSourceRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3738,
serialized_end=3869,
)
_UPDATESECURITYMARKSREQUEST = _descriptor.Descriptor(
name="UpdateSecurityMarksRequest",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="security_marks",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.security_marks",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="update_mask",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.update_mask",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest.start_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3872,
serialized_end=4073,
)
_CREATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_CREATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_GROUPASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GROUPASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPASSETSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPFINDINGSRESPONSE.fields_by_name["group_by_results"].message_type = _GROUPRESULT
_GROUPFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GROUPRESULT_PROPERTIESENTRY.fields_by_name[
"value"
].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_GROUPRESULT_PROPERTIESENTRY.containing_type = _GROUPRESULT
_GROUPRESULT.fields_by_name["properties"].message_type = _GROUPRESULT_PROPERTIESENTRY
_LISTSOURCESRESPONSE.fields_by_name[
"sources"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_LISTASSETSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTASSETSREQUEST.fields_by_name[
"compare_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_LISTASSETSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"asset"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_asset__pb2._ASSET
)
_LISTASSETSRESPONSE_LISTASSETSRESULT.fields_by_name[
"state"
].enum_type = _LISTASSETSRESPONSE_LISTASSETSRESULT_STATE
_LISTASSETSRESPONSE_LISTASSETSRESULT.containing_type = _LISTASSETSRESPONSE
_LISTASSETSRESPONSE_LISTASSETSRESULT_STATE.containing_type = (
_LISTASSETSRESPONSE_LISTASSETSRESULT
)
_LISTASSETSRESPONSE.fields_by_name[
"list_assets_results"
].message_type = _LISTASSETSRESPONSE_LISTASSETSRESULT
_LISTASSETSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTFINDINGSREQUEST.fields_by_name[
"field_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_LISTFINDINGSRESPONSE.fields_by_name[
"findings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_LISTFINDINGSRESPONSE.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SETFINDINGSTATEREQUEST.fields_by_name[
"state"
].enum_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING_STATE
)
_SETFINDINGSTATEREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEFINDINGREQUEST.fields_by_name[
"finding"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING
)
_UPDATEFINDINGREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"organization_settings"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS
)
_UPDATEORGANIZATIONSETTINGSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESOURCEREQUEST.fields_by_name[
"source"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE
)
_UPDATESOURCEREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"security_marks"
].message_type = (
google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS
)
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"update_mask"
].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATESECURITYMARKSREQUEST.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name["CreateFindingRequest"] = _CREATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name["CreateSourceRequest"] = _CREATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"GetOrganizationSettingsRequest"
] = _GETORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["GetSourceRequest"] = _GETSOURCEREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsRequest"] = _GROUPASSETSREQUEST
DESCRIPTOR.message_types_by_name["GroupAssetsResponse"] = _GROUPASSETSRESPONSE
DESCRIPTOR.message_types_by_name["GroupFindingsRequest"] = _GROUPFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["GroupFindingsResponse"] = _GROUPFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["GroupResult"] = _GROUPRESULT
DESCRIPTOR.message_types_by_name["ListSourcesRequest"] = _LISTSOURCESREQUEST
DESCRIPTOR.message_types_by_name["ListSourcesResponse"] = _LISTSOURCESRESPONSE
DESCRIPTOR.message_types_by_name["ListAssetsRequest"] = _LISTASSETSREQUEST
DESCRIPTOR.message_types_by_name["ListAssetsResponse"] = _LISTASSETSRESPONSE
DESCRIPTOR.message_types_by_name["ListFindingsRequest"] = _LISTFINDINGSREQUEST
DESCRIPTOR.message_types_by_name["ListFindingsResponse"] = _LISTFINDINGSRESPONSE
DESCRIPTOR.message_types_by_name["SetFindingStateRequest"] = _SETFINDINGSTATEREQUEST
DESCRIPTOR.message_types_by_name["RunAssetDiscoveryRequest"] = _RUNASSETDISCOVERYREQUEST
DESCRIPTOR.message_types_by_name["UpdateFindingRequest"] = _UPDATEFINDINGREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateOrganizationSettingsRequest"
] = _UPDATEORGANIZATIONSETTINGSREQUEST
DESCRIPTOR.message_types_by_name["UpdateSourceRequest"] = _UPDATESOURCEREQUEST
DESCRIPTOR.message_types_by_name[
"UpdateSecurityMarksRequest"
] = _UPDATESECURITYMARKSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateFindingRequest = _reflection.GeneratedProtocolMessageType(
"CreateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a finding.
Attributes:
parent:
Resource name of the new finding's parent. Its format should
be "organizations/[organization\_id]/sources/[source\_id]".
finding_id:
Unique identifier provided by the client within the parent
scope. It must be alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
finding:
The Finding being created. The name and security\_marks will
be ignored as they are both output only fields on this
resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateFindingRequest)
),
)
_sym_db.RegisterMessage(CreateFindingRequest)
CreateSourceRequest = _reflection.GeneratedProtocolMessageType(
"CreateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_CREATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for creating a source.
Attributes:
parent:
Resource name of the new source's parent. Its format should be
"organizations/[organization\_id]".
source:
The Source being created, only the display\_name and
description will be used. All other fields will be ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.CreateSourceRequest)
),
)
_sym_db.RegisterMessage(CreateSourceRequest)
GetOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"GetOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting organization settings.
Attributes:
name:
Name of the organization to get organization settings for. Its
format is
"organizations/[organization\_id]/organizationSettings".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(GetOrganizationSettingsRequest)
GetSourceRequest = _reflection.GeneratedProtocolMessageType(
"GetSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETSOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for getting a source.
Attributes:
name:
Relative resource name of the source. Its format is
"organizations/[organization\_id]/source/[source\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GetSourceRequest)
),
)
_sym_db.RegisterMessage(GetSourceRequest)
GroupAssetsRequest = _reflection.GeneratedProtocolMessageType(
"GroupAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by assets.
Attributes:
parent:
Name of the organization to groupBy. Its format is
"organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
group_by:
Expression that defines what assets fields to use for
grouping. The string value should follow SQL syntax: comma
separated list of fields. For example: "security\_center\_prop
erties.resource\_project,security\_center\_properties.project"
. The following fields are supported when compare\_duration
is not set: - security\_center\_properties.resource\_project
- security\_center\_properties.resource\_type -
security\_center\_properties.resource\_parent The following
fields are supported when compare\_duration is set: -
security\_center\_properties.resource\_type
compare_duration:
When compare\_duration is set, the Asset's "state" property is
updated to indicate whether the asset was added, removed, or
remained present during the compare\_duration period of time
that precedes the read\_time. This is the time between
(read\_time - compare\_duration) and read\_time. The state
value is derived based on the presence of the asset at the two
points in time. Intermediate state changes between the two
times don't affect the result. For example, the results aren't
affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at reference\_time. -
"REMOVED": indicates that the asset was present at the start
of compare\_duration, but not present at reference\_time. -
"ACTIVE": indicates that the asset was present at both the
start and the end of the time period defined by
compare\_duration and reference\_time. This field is
ignored if ``state`` is not a field in ``group_by``.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupAssetsResponse``;
indicates that this is a continuation of a prior
``GroupAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsRequest)
),
)
_sym_db.RegisterMessage(GroupAssetsRequest)
GroupAssetsResponse = _reflection.GeneratedProtocolMessageType(
"GroupAssetsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for grouping by assets.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupAssetsResponse)
),
)
_sym_db.RegisterMessage(GroupAssetsResponse)
GroupFindingsRequest = _reflection.GeneratedProtocolMessageType(
"GroupFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for grouping by findings.
Attributes:
parent:
Name of the source to groupBy. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
groupBy across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
group_by:
Expression that defines what assets fields to use for grouping
(including ``state``). The string value should follow SQL
syntax: comma separated list of fields. For example:
"parent,resource\_name". The following fields are supported:
- resource\_name - category - state - parent
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
page_token:
The value returned by the last ``GroupFindingsResponse``;
indicates that this is a continuation of a prior
``GroupFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsRequest)
),
)
_sym_db.RegisterMessage(GroupFindingsRequest)
GroupFindingsResponse = _reflection.GeneratedProtocolMessageType(
"GroupFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for group by findings.
Attributes:
group_by_results:
Group results. There exists an element for each existing
unique combination of property/values. The element contains a
count for the number of times those specific property/values
appear.
read_time:
Time used for executing the groupBy request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupFindingsResponse)
),
)
_sym_db.RegisterMessage(GroupFindingsResponse)
GroupResult = _reflection.GeneratedProtocolMessageType(
"GroupResult",
(_message.Message,),
dict(
PropertiesEntry=_reflection.GeneratedProtocolMessageType(
"PropertiesEntry",
(_message.Message,),
dict(
DESCRIPTOR=_GROUPRESULT_PROPERTIESENTRY,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult.PropertiesEntry)
),
),
DESCRIPTOR=_GROUPRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the properties and count of a groupBy request.
Attributes:
properties:
Properties matching the groupBy fields in the request.
count:
Total count of resources for the given properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.GroupResult)
),
)
_sym_db.RegisterMessage(GroupResult)
_sym_db.RegisterMessage(GroupResult.PropertiesEntry)
ListSourcesRequest = _reflection.GeneratedProtocolMessageType(
"ListSourcesRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing sources.
Attributes:
parent:
Resource name of the parent of sources to list. Its format
should be "organizations/[organization\_id]".
page_token:
The value returned by the last ``ListSourcesResponse``;
indicates that this is a continuation of a prior
``ListSources`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesRequest)
),
)
_sym_db.RegisterMessage(ListSourcesRequest)
ListSourcesResponse = _reflection.GeneratedProtocolMessageType(
"ListSourcesResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTSOURCESRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing sources.
Attributes:
sources:
Sources belonging to the requested parent.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListSourcesResponse)
),
)
_sym_db.RegisterMessage(ListSourcesResponse)
ListAssetsRequest = _reflection.GeneratedProtocolMessageType(
"ListAssetsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing assets.
Attributes:
parent:
Name of the organization assets should belong to. Its format
is "organizations/[organization\_id]".
filter:
Expression that defines the filter to apply across assets. The
expression is a list of zero or more restrictions combined via
logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. The fields map to those defined in the Asset
resource. Examples include: - name -
security\_center\_properties.resource\_name -
resource\_properties.a\_property -
security\_marks.marks.marka The supported operators are: -
``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=`` for
integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes. For example,
``resource_properties.size = 100`` is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,resource\_properties.a\_property".
Redundant space characters in the syntax are insignificant.
"name desc,resource\_properties.a\_property" and " name desc ,
resource\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering assets. The
filter is limited to assets existing at the supplied time and
their values are those at that specific time. Absence of this
field will default to the API's version of NOW.
compare_duration:
When compare\_duration is set, the ListAssetResult's "state"
attribute is updated to indicate whether the asset was added,
removed, or remained present during the compare\_duration
period of time that precedes the read\_time. This is the time
between (read\_time - compare\_duration) and read\_time. The
state value is derived based on the presence of the asset at
the two points in time. Intermediate state changes between the
two times don't affect the result. For example, the results
aren't affected if the asset is removed and re-created again.
Possible "state" values when compare\_duration is specified:
- "ADDED": indicates that the asset was not present before
compare\_duration, but present at read\_time. - "REMOVED":
indicates that the asset was present at the start of
compare\_duration, but not present at read\_time. - "ACTIVE":
indicates that the asset was present at both the start and
the end of the time period defined by compare\_duration and
read\_time. If compare\_duration is not specified, then the
only possible state is "UNUSED", which indicates that the
asset is present at read\_time.
field_mask:
Optional. A field mask to specify the ListAssetsResult fields
to be listed in the response. An empty field mask will list
all fields.
page_token:
The value returned by the last ``ListAssetsResponse``;
indicates that this is a continuation of a prior
``ListAssets`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsRequest)
),
)
_sym_db.RegisterMessage(ListAssetsRequest)
ListAssetsResponse = _reflection.GeneratedProtocolMessageType(
"ListAssetsResponse",
(_message.Message,),
dict(
ListAssetsResult=_reflection.GeneratedProtocolMessageType(
"ListAssetsResult",
(_message.Message,),
dict(
DESCRIPTOR=_LISTASSETSRESPONSE_LISTASSETSRESULT,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Result containing the Asset and its State.
Attributes:
asset:
Asset matching the search request.
state:
State of the asset.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse.ListAssetsResult)
),
),
DESCRIPTOR=_LISTASSETSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing assets.
Attributes:
list_assets_results:
Assets matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of assets matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListAssetsResponse)
),
)
_sym_db.RegisterMessage(ListAssetsResponse)
_sym_db.RegisterMessage(ListAssetsResponse.ListAssetsResult)
ListFindingsRequest = _reflection.GeneratedProtocolMessageType(
"ListFindingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for listing findings.
Attributes:
parent:
Name of the source the findings belong to. Its format is
"organizations/[organization\_id]/sources/[source\_id]". To
list across all sources provide a source\_id of ``-``. For
example: organizations/123/sources/-
filter:
Expression that defines the filter to apply across findings.
The expression is a list of one or more restrictions combined
via logical operators ``AND`` and ``OR``. Parentheses are not
supported, and ``OR`` has higher precedence than ``AND``.
Restrictions have the form ``<field> <operator> <value>`` and
may have a ``-`` character in front of them to indicate
negation. Examples include: - name -
source\_properties.a\_property - security\_marks.marks.marka
The supported operators are: - ``=`` for all value types. -
``>``, ``<``, ``>=``, ``<=`` for integer values. - ``:``,
meaning substring matching, for strings. The supported value
types are: - string literals in quotes. - integer literals
without quotes. - boolean literals ``true`` and ``false``
without quotes. For example, ``source_properties.size = 100``
is a valid filter string.
order_by:
Expression that defines what fields and order to use for
sorting. The string value should follow SQL syntax: comma
separated list of fields. For example:
"name,resource\_properties.a\_property". The default sorting
order is ascending. To specify descending order for a field, a
suffix " desc" should be appended to the field name. For
example: "name desc,source\_properties.a\_property". Redundant
space characters in the syntax are insignificant. "name
desc,source\_properties.a\_property" and " name desc ,
source\_properties.a\_property " are equivalent.
read_time:
Time used as a reference point when filtering findings. The
filter is limited to findings existing at the supplied time
and their values are those at that specific time. Absence of
this field will default to the API's version of NOW.
field_mask:
Optional. A field mask to specify the Finding fields to be
listed in the response. An empty field mask will list all
fields.
page_token:
The value returned by the last ``ListFindingsResponse``;
indicates that this is a continuation of a prior
``ListFindings`` call, and that the system should return the
next page of data.
page_size:
The maximum number of results to return in a single response.
Default is 10, minimum is 1, maximum is 1000.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsRequest)
),
)
_sym_db.RegisterMessage(ListFindingsRequest)
ListFindingsResponse = _reflection.GeneratedProtocolMessageType(
"ListFindingsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTFINDINGSRESPONSE,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Response message for listing findings.
Attributes:
findings:
Findings matching the list request.
read_time:
Time used for executing the list request.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results.
total_size:
The total number of findings matching the query.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.ListFindingsResponse)
),
)
_sym_db.RegisterMessage(ListFindingsResponse)
SetFindingStateRequest = _reflection.GeneratedProtocolMessageType(
"SetFindingStateRequest",
(_message.Message,),
dict(
DESCRIPTOR=_SETFINDINGSTATEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a finding's state.
Attributes:
name:
The relative resource name of the finding. See: https://cloud.
google.com/apis/design/resource\_names#relative\_resource\_nam
e Example: "organizations/123/sources/456/finding/789".
state:
The desired State of the finding.
start_time:
The time at which the updated state takes effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.SetFindingStateRequest)
),
)
_sym_db.RegisterMessage(SetFindingStateRequest)
RunAssetDiscoveryRequest = _reflection.GeneratedProtocolMessageType(
"RunAssetDiscoveryRequest",
(_message.Message,),
dict(
DESCRIPTOR=_RUNASSETDISCOVERYREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for running asset discovery for an organization.
Attributes:
parent:
Name of the organization to run asset discovery for. Its
format is "organizations/[organization\_id]".
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.RunAssetDiscoveryRequest)
),
)
_sym_db.RegisterMessage(RunAssetDiscoveryRequest)
UpdateFindingRequest = _reflection.GeneratedProtocolMessageType(
"UpdateFindingRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEFINDINGREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating or creating a finding.
Attributes:
finding:
The finding resource to update or create if it does not
already exist. parent, security\_marks, and update\_time will
be ignored. In the case of creation, the finding id portion
of the name must alphanumeric and less than or equal to 32
characters and greater than 0 characters in length.
update_mask:
The FieldMask to use when updating the finding resource. This
field is ignored if the finding does not already exist and the
finding is created.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateFindingRequest)
),
)
_sym_db.RegisterMessage(UpdateFindingRequest)
UpdateOrganizationSettingsRequest = _reflection.GeneratedProtocolMessageType(
"UpdateOrganizationSettingsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATEORGANIZATIONSETTINGSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating an organization's settings.
Attributes:
organization_settings:
The organization settings resource to update.
update_mask:
The FieldMask to use when updating the settings resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateOrganizationSettingsRequest)
),
)
_sym_db.RegisterMessage(UpdateOrganizationSettingsRequest)
UpdateSourceRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSourceRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESOURCEREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a source.
Attributes:
source:
The source resource to update.
update_mask:
The FieldMask to use when updating the source resource.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSourceRequest)
),
)
_sym_db.RegisterMessage(UpdateSourceRequest)
UpdateSecurityMarksRequest = _reflection.GeneratedProtocolMessageType(
"UpdateSecurityMarksRequest",
(_message.Message,),
dict(
DESCRIPTOR=_UPDATESECURITYMARKSREQUEST,
__module__="google.cloud.securitycenter_v1beta1.proto.securitycenter_service_pb2",
__doc__="""Request message for updating a SecurityMarks resource.
Attributes:
security_marks:
The security marks resource to update.
update_mask:
The FieldMask to use when updating the security marks
resource.
start_time:
The time at which the updated SecurityMarks take effect.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1beta1.UpdateSecurityMarksRequest)
),
)
_sym_db.RegisterMessage(UpdateSecurityMarksRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n'com.google.cloud.securitycenter.v1beta1P\001ZQgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1;securitycenter"
),
)
_GROUPRESULT_PROPERTIESENTRY.has_options = True
_GROUPRESULT_PROPERTIESENTRY._options = _descriptor._ParseOptions(
descriptor_pb2.MessageOptions(), _b("8\001")
)
_SECURITYCENTER = _descriptor.ServiceDescriptor(
name="SecurityCenter",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter",
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=4076,
serialized_end=7660,
methods=[
_descriptor.MethodDescriptor(
name="CreateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateSource",
index=0,
containing_service=None,
input_type=_CREATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023")/v1beta1/{parent=organizations/*}/sources:\006source'
),
),
),
_descriptor.MethodDescriptor(
name="CreateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.CreateFinding",
index=1,
containing_service=None,
input_type=_CREATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?"4/v1beta1/{parent=organizations/*/sources/*}/findings:\007finding'
),
),
),
_descriptor.MethodDescriptor(
name="GetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetIamPolicy",
index=2,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:getIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GetOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetOrganizationSettings",
index=3,
containing_service=None,
input_type=_GETORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{name=organizations/*/organizationSettings}"
),
),
),
_descriptor.MethodDescriptor(
name="GetSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GetSource",
index=4,
containing_service=None,
input_type=_GETSOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{name=organizations/*/sources/*}"
),
),
),
_descriptor.MethodDescriptor(
name="GroupAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupAssets",
index=5,
containing_service=None,
input_type=_GROUPASSETSREQUEST,
output_type=_GROUPASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\0023"./v1beta1/{parent=organizations/*}/assets:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="GroupFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.GroupFindings",
index=6,
containing_service=None,
input_type=_GROUPFINDINGSREQUEST,
output_type=_GROUPFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{parent=organizations/*/sources/*}/findings:group:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="ListAssets",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListAssets",
index=7,
containing_service=None,
input_type=_LISTASSETSREQUEST,
output_type=_LISTASSETSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002*\022(/v1beta1/{parent=organizations/*}/assets"
),
),
),
_descriptor.MethodDescriptor(
name="ListFindings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListFindings",
index=8,
containing_service=None,
input_type=_LISTFINDINGSREQUEST,
output_type=_LISTFINDINGSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\0026\0224/v1beta1/{parent=organizations/*/sources/*}/findings"
),
),
),
_descriptor.MethodDescriptor(
name="ListSources",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.ListSources",
index=9,
containing_service=None,
input_type=_LISTSOURCESREQUEST,
output_type=_LISTSOURCESRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002+\022)/v1beta1/{parent=organizations/*}/sources"
),
),
),
_descriptor.MethodDescriptor(
name="RunAssetDiscovery",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.RunAssetDiscovery",
index=10,
containing_service=None,
input_type=_RUNASSETDISCOVERYREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002:"5/v1beta1/{parent=organizations/*}/assets:runDiscovery:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetFindingState",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetFindingState",
index=11,
containing_service=None,
input_type=_SETFINDINGSTATEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002B"=/v1beta1/{name=organizations/*/sources/*/findings/*}:setState:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="SetIamPolicy",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.SetIamPolicy",
index=12,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST,
output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002?":/v1beta1/{resource=organizations/*/sources/*}:setIamPolicy:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="TestIamPermissions",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.TestIamPermissions",
index=13,
containing_service=None,
input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST,
output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
'\202\323\344\223\002E"@/v1beta1/{resource=organizations/*/sources/*}:testIamPermissions:\001*'
),
),
),
_descriptor.MethodDescriptor(
name="UpdateFinding",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateFinding",
index=14,
containing_service=None,
input_type=_UPDATEFINDINGREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_finding__pb2._FINDING,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002G2</v1beta1/{finding.name=organizations/*/sources/*/findings/*}:\007finding"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateOrganizationSettings",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateOrganizationSettings",
index=15,
containing_service=None,
input_type=_UPDATEORGANIZATIONSETTINGSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_organization__settings__pb2._ORGANIZATIONSETTINGS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002c2J/v1beta1/{organization_settings.name=organizations/*/organizationSettings}:\025organization_settings"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSource",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSource",
index=16,
containing_service=None,
input_type=_UPDATESOURCEREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_source__pb2._SOURCE,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002:20/v1beta1/{source.name=organizations/*/sources/*}:\006source"
),
),
),
_descriptor.MethodDescriptor(
name="UpdateSecurityMarks",
full_name="google.cloud.securitycenter.v1beta1.SecurityCenter.UpdateSecurityMarks",
index=17,
containing_service=None,
input_type=_UPDATESECURITYMARKSREQUEST,
output_type=google_dot_cloud_dot_securitycenter__v1beta1_dot_proto_dot_security__marks__pb2._SECURITYMARKS,
options=_descriptor._ParseOptions(
descriptor_pb2.MethodOptions(),
_b(
"\202\323\344\223\002\274\0012E/v1beta1/{security_marks.name=organizations/*/assets/*/securityMarks}:\016security_marksZc2Q/v1beta1/{security_marks.name=organizations/*/sources/*/findings/*/securityMarks}:\016security_marks"
),
),
),
],
)
_sym_db.RegisterServiceDescriptor(_SECURITYCENTER)
DESCRIPTOR.services_by_name["SecurityCenter"] = _SECURITYCENTER
| true
| true
|
79024894f3ecdb13739c0edc58d07ee1bc8be291
| 759
|
py
|
Python
|
demo.py
|
paulhoule/usb_receipt_printer
|
7526ffad7ccdb3aac95acf76d4058e6f3b532c1d
|
[
"MIT"
] | null | null | null |
demo.py
|
paulhoule/usb_receipt_printer
|
7526ffad7ccdb3aac95acf76d4058e6f3b532c1d
|
[
"MIT"
] | null | null | null |
demo.py
|
paulhoule/usb_receipt_printer
|
7526ffad7ccdb3aac95acf76d4058e6f3b532c1d
|
[
"MIT"
] | null | null | null |
from escpos.printer import Usb
from pathlib import Path
image = Path("/tamamo-no-mae/me-cloudy.png")
printer = Usb(0x0416, 0x5011, 0, profile="ZJ-5870")
printer.image(image);
printer.cut()
# with printer() as that:
# that.write('Hello, world!\n\n')
# # 000000000111111111122222222223
# # 123456789012345678901234567890
# that.write('Soluta sed voluptatem ut\n')
# that.write('facere aut. Modi placeat et\n')
# that.write('eius voluptate sint ut.\n')
# that.write('Facilis minima ex quia quia\n')
# that.write('consectetur ex ipsa. Neque et\n')
# that.write('voluptatem ipsa enim error\n')
# that.write('rthatrehenderit ex dolore.\n')
# that.write('Cupiditate ad voluptatem nisi.\n\n\n\n')
# ZJ-5870
| 37.95
| 58
| 0.670619
|
from escpos.printer import Usb
from pathlib import Path
image = Path("/tamamo-no-mae/me-cloudy.png")
printer = Usb(0x0416, 0x5011, 0, profile="ZJ-5870")
printer.image(image);
printer.cut()
| true
| true
|
790248e57ea50863c364ce7cb42db5c22f1e44f4
| 1,851
|
py
|
Python
|
package/make-deb.py
|
jayvdb/pypi-server
|
596a2fa2dd5d90cff445822bfadff5a9b7f6be9c
|
[
"MIT"
] | 119
|
2015-12-07T22:41:08.000Z
|
2022-03-16T05:55:06.000Z
|
package/make-deb.py
|
jayvdb/pypi-server
|
596a2fa2dd5d90cff445822bfadff5a9b7f6be9c
|
[
"MIT"
] | 37
|
2016-01-19T16:28:03.000Z
|
2022-03-06T08:03:04.000Z
|
package/make-deb.py
|
jayvdb/pypi-server
|
596a2fa2dd5d90cff445822bfadff5a9b7f6be9c
|
[
"MIT"
] | 55
|
2015-12-09T12:21:10.000Z
|
2021-11-29T13:22:24.000Z
|
import os
from subprocess import check_output
import plumbum
from plumbum.cmd import grep, fpm, ln, sort, find, virtualenv
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.INFO)
ENV_PATH = os.getenv("ENV_PATH", "/usr/share/python3/pypi-server")
SRC_PATH = os.getenv("SRC_PATH", "/mnt")
pip = plumbum.local[os.path.join(ENV_PATH, 'bin', 'pip3')]
log.info("Creating virtualenv %r", ENV_PATH)
virtualenv['-p', 'python3', ENV_PATH] & plumbum.FG
log.info("Installing package %r", SRC_PATH)
pip['install', '--no-binary=:all:', '-U', "{}[postgres]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[proxy]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[mysql]".format(SRC_PATH)] & plumbum.FG
ln['-snf', os.path.join(ENV_PATH, "bin", "pypi-server"), "/usr/bin/pypi-server"] & plumbum.BG
version = (pip['show', 'pypi-server'] | grep['^Version']) & plumbum.BG
version.wait()
version = version.stdout.strip().replace("Version:", '').strip()
args = (
'-s', 'dir',
'-f', '-t', 'deb',
'--iteration', os.getenv('ITERATION', '0'),
'-n', 'pypi-server',
'--config-files', '/etc/pypi-server.conf',
'--deb-systemd', '/mnt/contrib/pypi-server.service',
'-v', version,
'-p', "/mnt/dist",
'-d', 'python3',
'-d', 'python3-distutils',
)
depends = check_output((
'find %s -iname "*.so" -exec ldd {} \; | '
'''awk '{print $1}' | '''
'sort -u | '
'xargs dpkg -S | '
'''awk '{print $1}' | '''
'sort -u | '
'''cut -d ':' -f1 | sort -u'''
) % ENV_PATH, shell=True).decode('utf-8').splitlines()
for depend in depends:
args += ('-d', depend)
args += (
'{0}/={0}/'.format(ENV_PATH),
'/usr/bin/pypi-server=/usr/bin/pypi-server',
'/mnt/contrib/pypi-server.conf.example=/etc/pypi-server.conf',
)
fpm[args] & plumbum.FG
| 29.380952
| 93
| 0.605619
|
import os
from subprocess import check_output
import plumbum
from plumbum.cmd import grep, fpm, ln, sort, find, virtualenv
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.INFO)
ENV_PATH = os.getenv("ENV_PATH", "/usr/share/python3/pypi-server")
SRC_PATH = os.getenv("SRC_PATH", "/mnt")
pip = plumbum.local[os.path.join(ENV_PATH, 'bin', 'pip3')]
log.info("Creating virtualenv %r", ENV_PATH)
virtualenv['-p', 'python3', ENV_PATH] & plumbum.FG
log.info("Installing package %r", SRC_PATH)
pip['install', '--no-binary=:all:', '-U', "{}[postgres]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[proxy]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[mysql]".format(SRC_PATH)] & plumbum.FG
ln['-snf', os.path.join(ENV_PATH, "bin", "pypi-server"), "/usr/bin/pypi-server"] & plumbum.BG
version = (pip['show', 'pypi-server'] | grep['^Version']) & plumbum.BG
version.wait()
version = version.stdout.strip().replace("Version:", '').strip()
args = (
'-s', 'dir',
'-f', '-t', 'deb',
'--iteration', os.getenv('ITERATION', '0'),
'-n', 'pypi-server',
'--config-files', '/etc/pypi-server.conf',
'--deb-systemd', '/mnt/contrib/pypi-server.service',
'-v', version,
'-p', "/mnt/dist",
'-d', 'python3',
'-d', 'python3-distutils',
)
depends = check_output((
'find %s -iname "*.so" -exec ldd {} \; | '
'''awk '{print $1}' | '''
'sort -u | '
'xargs dpkg -S | '
'''awk '{print $1}' | '''
'sort -u | '
'''cut -d ':' -f1 | sort -u'''
) % ENV_PATH, shell=True).decode('utf-8').splitlines()
for depend in depends:
args += ('-d', depend)
args += (
'{0}/={0}/'.format(ENV_PATH),
'/usr/bin/pypi-server=/usr/bin/pypi-server',
'/mnt/contrib/pypi-server.conf.example=/etc/pypi-server.conf',
)
fpm[args] & plumbum.FG
| true
| true
|
790248fc6aadfb2df0963cfb95398dc94f7864da
| 6,983
|
py
|
Python
|
tests/test_simulate.py
|
bshapiroalbert/PsrSigSim
|
74bb40814295fb6ef84aa932a0de2f684162b8c4
|
[
"MIT"
] | 1
|
2021-09-06T09:03:38.000Z
|
2021-09-06T09:03:38.000Z
|
tests/test_simulate.py
|
bshapiroalbert/PsrSigSim
|
74bb40814295fb6ef84aa932a0de2f684162b8c4
|
[
"MIT"
] | 1
|
2020-12-21T18:02:57.000Z
|
2020-12-21T22:07:17.000Z
|
tests/test_simulate.py
|
bshapiroalbert/PsrSigSim
|
74bb40814295fb6ef84aa932a0de2f684162b8c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import glob
from psrsigsim.signal.fb_signal import FilterBankSignal
from psrsigsim.pulsar.pulsar import Pulsar
from psrsigsim.pulsar.portraits import DataPortrait
from psrsigsim.pulsar.profiles import DataProfile
from psrsigsim.ism.ism import ISM
from psrsigsim.telescope.telescope import Telescope
from psrsigsim.telescope.receiver import Receiver
from psrsigsim.telescope.backend import Backend
from psrsigsim.io.psrfits import PSRFITS
from psrsigsim.utils.utils import make_quant
from psrsigsim.io.txtfile import TxtFile
from psrsigsim.simulate.simulate import Simulation
@pytest.fixture
def j1713_profile():
"""
Numpy array of J1713+0747 profile.
"""
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path)
@pytest.fixture
def PSRfits():
"""
Fixture psrfits class
"""
fitspath = "data/test.fits"
tempfits = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm"
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy')
@pytest.fixture
def param_dict():
"""
Fixture parameter dictionary.
"""
pdict = {'fcent' : 430,
'bandwidth' : 100,
'sample_rate' : 1.5625,
'dtype' : np.float32,
'Npols' : 1,
'Nchan' : 64,
'sublen' : 2.0,
'fold' : True,
'period' : 1.0,
'Smean' : 1.0,
'profiles' : [0.5, 0.5, 1.0], # Gaussian
'tobs' : 4.0,
'name' : 'J0000+0000',
'dm' : 10.0,
'tau_d' : 50e-9,
'tau_d_ref_f' : 1500.0,
'aperture' : 100.0,
'area' : 5500.0,
'Tsys' : 35.0,
'tscope_name' : "TestScope",
'system_name' : "TestSys",
'rcvr_fcent' : 430,
'rcvr_bw' : 100,
'rcvr_name' : "TestRCVR",
'backend_samprate' : 1.5625,
'backend_name' : "TestBack",
'tempfile' : None,
'parfile' : None,
}
return pdict
@pytest.fixture
def simulation():
"""
Fixture Simulation class. Cannot be the only simulation tested.
"""
sim = Simulation(fcent = 430,
bandwidth = 100,
sample_rate = 1.0*2048*10**-6,
dtype = np.float32,
Npols = 1,
Nchan = 64,
sublen = 2.0,
fold = True,
period = 1.0,
Smean = 1.0,
profiles = None,
tobs = 4.0,
name = 'J0000+0000',
dm = 10.0,
tau_d = 50e-9,
tau_d_ref_f = 1500.0,
aperture = 100.0,
area = 5500.0,
Tsys = 35.0,
tscope_name = "TestScope",
system_name = "TestSys",
rcvr_fcent = 430,
rcvr_bw = 100,
rcvr_name ="TestRCVR",
backend_samprate = 1.5625,
backend_name = "TestBack",
tempfile = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm",
parfile = None,
psrdict = None)
return sim
def test_initsim(param_dict):
"""
Test initializing the simulation from dictionary, parfile
"""
sim = Simulation(psrdict = param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile = "testpar.par")
def test_initsig(simulation):
"""
Test init_signal function.
"""
# Test from input params
simulation.init_signal()
# Test from template file
simulation.init_signal(from_template = True)
def test_initprof(simulation, j1713_profile):
"""
Test init_profile function.
"""
# Test no input
simulation.init_profile()
# Test function input
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return p0[0]* np.exp(-0.5*((x-p0[1])/(p0[2]))**2)
simulation._profiles = gprof
simulation.init_profile()
# Test Gaussian as input
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
# Test data array as input
simulation._profiles = j1713_profile
simulation.init_profile()
# Test array that's not long enough
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
# Test profile class as input
pr = DataProfile(j1713_profile,phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile()
def test_initpsr(simulation):
"""
Test init_pulsar function.
"""
simulation.init_pulsar()
def test_initism(simulation):
"""
Test init_ism function.
"""
simulation.init_ism()
def test_inittscope(simulation):
"""
Test init_telescope function.
"""
# Test init GBT
simulation._tscope_name = "GBT"
simulation.init_telescope()
# Test init Arecibo
simulation._tscope_name = "Arecibo"
simulation.init_telescope()
# Test input telescope
simulation._tscope_name = "TestScope"
simulation.init_telescope()
# Test list of systems for telescope
simulation._system_name = ["Sys1", "Sys2"]
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ["R1", "R2"]
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ["B1", "B2"]
simulation.init_telescope()
# And the catch with multiple systems
with pytest.raises(RuntimeError):
simulation._backend_name = ["B1", "B2", "B3"]
simulation.init_telescope()
def test_simulate(simulation):
"""
Test simulate function.
"""
simulation.simulate()
@pytest.mark.filterwarnings('ignore::fitsio.FITSRuntimeWarning')
def test_savesim(simulation, PSRfits):
"""
Test save simulation function.
"""
simulation._Nchan = 1
simulation._tobs = 2.0
#S = PSRfits.make_signal_from_psrfits()
#simulation._tobs = PSRfits.tsubint.value*PSRfits.nsubint
simulation.simulate(from_template = True)
# Try pdv format
simulation.save_simulation(out_format = "pdv")
# Try psrfits format
simulation.save_simulation(out_format = "psrfits", phaseconnect = False)
os.remove("sim_fits.fits")
# Try psrfits format with phaseconnect = True
#parfile = "data/test_parfile.par"
#simulation._parfile = parfile
#simulation.save_simulation(out_format = "psrfits", phaseconnect = True)
#os.remove("sim_fits.fits")
dfs = glob.glob("simfits*")
for df in dfs:
os.remove(df)
# Try psrfits with runtime error
# Try wrong output file type
with pytest.raises(RuntimeError):
simulation.save_simulation(out_format = "wrong_fmt")
simulation._tempfile = None
simulation.save_simulation(out_format = "psrfits")
| 30.229437
| 76
| 0.597594
|
import pytest
import os
import numpy as np
import glob
from psrsigsim.signal.fb_signal import FilterBankSignal
from psrsigsim.pulsar.pulsar import Pulsar
from psrsigsim.pulsar.portraits import DataPortrait
from psrsigsim.pulsar.profiles import DataProfile
from psrsigsim.ism.ism import ISM
from psrsigsim.telescope.telescope import Telescope
from psrsigsim.telescope.receiver import Receiver
from psrsigsim.telescope.backend import Backend
from psrsigsim.io.psrfits import PSRFITS
from psrsigsim.utils.utils import make_quant
from psrsigsim.io.txtfile import TxtFile
from psrsigsim.simulate.simulate import Simulation
@pytest.fixture
def j1713_profile():
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path)
@pytest.fixture
def PSRfits():
fitspath = "data/test.fits"
tempfits = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm"
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy')
@pytest.fixture
def param_dict():
pdict = {'fcent' : 430,
'bandwidth' : 100,
'sample_rate' : 1.5625,
'dtype' : np.float32,
'Npols' : 1,
'Nchan' : 64,
'sublen' : 2.0,
'fold' : True,
'period' : 1.0,
'Smean' : 1.0,
'profiles' : [0.5, 0.5, 1.0],
'tobs' : 4.0,
'name' : 'J0000+0000',
'dm' : 10.0,
'tau_d' : 50e-9,
'tau_d_ref_f' : 1500.0,
'aperture' : 100.0,
'area' : 5500.0,
'Tsys' : 35.0,
'tscope_name' : "TestScope",
'system_name' : "TestSys",
'rcvr_fcent' : 430,
'rcvr_bw' : 100,
'rcvr_name' : "TestRCVR",
'backend_samprate' : 1.5625,
'backend_name' : "TestBack",
'tempfile' : None,
'parfile' : None,
}
return pdict
@pytest.fixture
def simulation():
sim = Simulation(fcent = 430,
bandwidth = 100,
sample_rate = 1.0*2048*10**-6,
dtype = np.float32,
Npols = 1,
Nchan = 64,
sublen = 2.0,
fold = True,
period = 1.0,
Smean = 1.0,
profiles = None,
tobs = 4.0,
name = 'J0000+0000',
dm = 10.0,
tau_d = 50e-9,
tau_d_ref_f = 1500.0,
aperture = 100.0,
area = 5500.0,
Tsys = 35.0,
tscope_name = "TestScope",
system_name = "TestSys",
rcvr_fcent = 430,
rcvr_bw = 100,
rcvr_name ="TestRCVR",
backend_samprate = 1.5625,
backend_name = "TestBack",
tempfile = "data/B1855+09.L-wide.PUPPI.11y.x.sum.sm",
parfile = None,
psrdict = None)
return sim
def test_initsim(param_dict):
sim = Simulation(psrdict = param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile = "testpar.par")
def test_initsig(simulation):
simulation.init_signal()
simulation.init_signal(from_template = True)
def test_initprof(simulation, j1713_profile):
simulation.init_profile()
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return p0[0]* np.exp(-0.5*((x-p0[1])/(p0[2]))**2)
simulation._profiles = gprof
simulation.init_profile()
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
simulation._profiles = j1713_profile
simulation.init_profile()
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
# Test profile class as input
pr = DataProfile(j1713_profile,phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile()
def test_initpsr(simulation):
simulation.init_pulsar()
def test_initism(simulation):
simulation.init_ism()
def test_inittscope(simulation):
# Test init GBT
simulation._tscope_name = "GBT"
simulation.init_telescope()
# Test init Arecibo
simulation._tscope_name = "Arecibo"
simulation.init_telescope()
# Test input telescope
simulation._tscope_name = "TestScope"
simulation.init_telescope()
# Test list of systems for telescope
simulation._system_name = ["Sys1", "Sys2"]
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ["R1", "R2"]
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ["B1", "B2"]
simulation.init_telescope()
# And the catch with multiple systems
with pytest.raises(RuntimeError):
simulation._backend_name = ["B1", "B2", "B3"]
simulation.init_telescope()
def test_simulate(simulation):
simulation.simulate()
@pytest.mark.filterwarnings('ignore::fitsio.FITSRuntimeWarning')
def test_savesim(simulation, PSRfits):
simulation._Nchan = 1
simulation._tobs = 2.0
#S = PSRfits.make_signal_from_psrfits()
#simulation._tobs = PSRfits.tsubint.value*PSRfits.nsubint
simulation.simulate(from_template = True)
# Try pdv format
simulation.save_simulation(out_format = "pdv")
# Try psrfits format
simulation.save_simulation(out_format = "psrfits", phaseconnect = False)
os.remove("sim_fits.fits")
# Try psrfits format with phaseconnect = True
#parfile = "data/test_parfile.par"
#simulation._parfile = parfile
#simulation.save_simulation(out_format = "psrfits", phaseconnect = True)
#os.remove("sim_fits.fits")
dfs = glob.glob("simfits*")
for df in dfs:
os.remove(df)
# Try psrfits with runtime error
# Try wrong output file type
with pytest.raises(RuntimeError):
simulation.save_simulation(out_format = "wrong_fmt")
simulation._tempfile = None
simulation.save_simulation(out_format = "psrfits")
| true
| true
|
7902498bb5bac09d154ce48ad7e0224f31b8f5d5
| 612
|
py
|
Python
|
notebooks/develop/2021-02-18-gc-bawe-data-grouping.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | null | null | null |
notebooks/develop/2021-02-18-gc-bawe-data-grouping.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | 9
|
2021-03-04T20:29:54.000Z
|
2021-03-31T22:03:51.000Z
|
notebooks/develop/2021-02-18-gc-bawe-data-grouping.py
|
grchristensen/avpd
|
f7617844ae454a93825aa231e04c125cb4e58a20
|
[
"Apache-2.0"
] | 3
|
2021-01-30T02:19:07.000Z
|
2021-04-11T19:48:37.000Z
|
import pickle
import os
from tqdm import tqdm
with open('../data/bawe_splits.p', 'rb') as f:
splits = pickle.load(f)
if not os.path.isdir('../data/preprocess/bawe-group'):
os.mkdir('../data/preprocess/bawe-group')
for filename in tqdm(splits['train']):
id = filename[:4]
with open(f'../data/bawe/CORPUS_TXT/{filename}', 'r') as f:
if not os.path.isdir(f'../data/preprocess/bawe-group/{id}'):
os.mkdir(f'../data/preprocess/bawe-group/{id}')
text = f.read()
with open(f'../data/preprocess/bawe-group/{id}/{filename}', 'w') as wf:
wf.write(text)
| 29.142857
| 79
| 0.609477
|
import pickle
import os
from tqdm import tqdm
with open('../data/bawe_splits.p', 'rb') as f:
splits = pickle.load(f)
if not os.path.isdir('../data/preprocess/bawe-group'):
os.mkdir('../data/preprocess/bawe-group')
for filename in tqdm(splits['train']):
id = filename[:4]
with open(f'../data/bawe/CORPUS_TXT/{filename}', 'r') as f:
if not os.path.isdir(f'../data/preprocess/bawe-group/{id}'):
os.mkdir(f'../data/preprocess/bawe-group/{id}')
text = f.read()
with open(f'../data/preprocess/bawe-group/{id}/{filename}', 'w') as wf:
wf.write(text)
| true
| true
|
7902498c94d5d4db643f4846d9e565ea70b88a18
| 3,769
|
py
|
Python
|
pypureclient/flasharray/FA_2_11/models/port_common.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_11/models/port_common.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_11/models/port_common.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class PortCommon(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iqn': 'str',
'nqn': 'str',
'portal': 'str',
'wwn': 'str'
}
attribute_map = {
'iqn': 'iqn',
'nqn': 'nqn',
'portal': 'portal',
'wwn': 'wwn'
}
required_args = {
}
def __init__(
self,
iqn=None, # type: str
nqn=None, # type: str
portal=None, # type: str
wwn=None, # type: str
):
"""
Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).
"""
if iqn is not None:
self.iqn = iqn
if nqn is not None:
self.nqn = nqn
if portal is not None:
self.portal = portal
if wwn is not None:
self.wwn = wwn
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PortCommon`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortCommon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.992308
| 105
| 0.529053
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class PortCommon(object):
swagger_types = {
'iqn': 'str',
'nqn': 'str',
'portal': 'str',
'wwn': 'str'
}
attribute_map = {
'iqn': 'iqn',
'nqn': 'nqn',
'portal': 'portal',
'wwn': 'wwn'
}
required_args = {
}
def __init__(
self,
iqn=None,
nqn=None,
portal=None,
wwn=None,
):
if iqn is not None:
self.iqn = iqn
if nqn is not None:
self.nqn = nqn
if portal is not None:
self.portal = portal
if wwn is not None:
self.wwn = wwn
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PortCommon`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PortCommon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
79024ab06d60833685580286314d96518fe7e45a
| 6,004
|
py
|
Python
|
scripts/train_dqn.py
|
johny-c/noge
|
88e68ba8c51ff0d63577991e233e9110cb76e228
|
[
"MIT"
] | null | null | null |
scripts/train_dqn.py
|
johny-c/noge
|
88e68ba8c51ff0d63577991e233e9110cb76e228
|
[
"MIT"
] | null | null | null |
scripts/train_dqn.py
|
johny-c/noge
|
88e68ba8c51ff0d63577991e233e9110cb76e228
|
[
"MIT"
] | null | null | null |
import copy
import torch
import logging
import numpy as np
from sacred import Experiment
from noge.data_loaders import get_datasets, get_test_loader, get_train_generator
from noge.factory import make_env, make_memory
from noge.network import make_network
from noge.agent import Actor, main_loop, loop_ing
from noge.trainers import DQNTrainer, Replay
from noge.policies import LinearSchedule, GraphDQNPolicy
from noge.preprocessors import Preprocessor
from noge.evaluation import Evaluator, eval_ing
from noge.constants import CONFIGS_DIR, EVAL_DIR
from xlog.utils import get_logger
from xlog.mlflow_observer import MlflowObserver
ex = Experiment(name='NOGE_DQN', ingredients=[eval_ing, loop_ing])
ex.add_config(str(CONFIGS_DIR / 'dqn.yaml'))
ex.logger = get_logger(__name__, level=logging.INFO)
ex.observers = [MlflowObserver(tracking_uri=str(EVAL_DIR.absolute()))]
@ex.automain
def train(dataset, test_size, max_episode_steps, reward_type, input_meas_type, meas_transform,
target_transform, node_history, gamma, target_update_freq,
cat_features, feature_range, replay_capacity, min_horizon, epsilon_start, epsilon_end,
exploration_frac, n_train_steps, train_freq, loss, batch_size, lr, n_test_episodes, init_eval,
n_eval_artifacts, test_freq, log_freq, device, seed, data_seed, save_model, _log, _run, _config):
np.set_printoptions(precision=2, suppress=True)
if device.startswith('cuda'):
assert torch.cuda.is_available()
logger = _log
device = torch.device(device)
# data source
train_set, test_set = get_datasets(dataset, seed=data_seed, test_size=test_size)
max_nodes = max(train_set.max_nodes, test_set.max_nodes)
max_edges = 2 * max(train_set.max_edges, test_set.max_edges) # for undirected graphs, consider both directions
test_loader = get_test_loader(test_set, seed=seed, num_samples=n_test_episodes)
train_gen = get_train_generator(train_set, seed=seed)
preprocessor = Preprocessor(input_meas_type=input_meas_type,
output_meas_type=input_meas_type,
feature_range=feature_range,
meas_transform=meas_transform,
target_transform=target_transform,
temporal_offsets=[1.],
max_nodes=max_nodes,
device=device)
# environment
train_env_config = dict(
max_episode_steps=max_episode_steps,
reward_type=reward_type,
max_nodes=max_nodes,
max_edges=max_edges,
nn_feat='N' in cat_features,
)
train_env = make_env(**train_env_config, data_generator=train_gen, seed=seed)
test_env_config = copy.deepcopy(train_env_config)
test_env_config.update(sample_goals=False, data_generator=None)
test_env = make_env(**test_env_config, seed=seed)
# graph memory + graph preprocessing
neg_label, pos_label = feature_range
mem_features = dict(cat=cat_features)
graph_mem_config = dict(
max_episode_steps=max_episode_steps,
max_nodes=max_nodes,
max_edges=max_edges,
history=node_history,
memory_type='cat',
features=mem_features,
neg_label=neg_label,
pos_label=pos_label
)
eval_memory = make_memory(online=True, **graph_mem_config)
acting_memory = make_memory(online=True, **graph_mem_config)
# model
model_config = dict(
dim_node=eval_memory.dim_node,
dim_meas=preprocessor.dim_input_meas,
dim_goal=1,
max_edges=max_edges,
**_config['model']
)
network = make_network(**model_config).to(device)
# evaluation
eval_policy = GraphDQNPolicy(network, eval_memory, preprocessor=preprocessor, device=device)
evaluator = Evaluator(test_loader, test_env, eval_policy)
# experience collecting policy
exploration_steps = int(exploration_frac * n_train_steps)
exploration_schedule = LinearSchedule(epsilon_start, epsilon_end, exploration_steps)
acting_policy = GraphDQNPolicy(network,
graph_memory=acting_memory,
preprocessor=preprocessor,
exploration_schedule=exploration_schedule,
device=device)
# replay buffer
replay_buffer = Replay(capacity=replay_capacity,
ob_space=train_env.observation_space,
graph_mem_config=graph_mem_config,
min_horizon=min_horizon)
# actor: runs the simulation forward and stores to the replay buffer
actor = Actor(train_env, acting_policy, replay_buffer)
# trainer
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
if loss == 'mse':
criterion = torch.nn.MSELoss()
else:
raise ValueError(f"Unsupported loss: {loss}")
trainer = DQNTrainer(gamma=gamma,
target_update_freq=target_update_freq,
replay_buffer=replay_buffer,
batch_size=batch_size,
network=network,
preprocessor=preprocessor,
criterion=criterion,
optimizer=optimizer,
device=device)
# fill up the replay buffer
network.eval()
logger.info(f"Filling up the replay buffer...")
actor.step(n=replay_capacity, use_tqdm=True)
logger.info(f"Replay buffer filled: [{len(replay_buffer)} / {replay_capacity}]")
# fit the preprocessor with buffer data
preprocessor.fit(replay_buffer._measurements)
best_perf = main_loop(actor, trainer, evaluator, network, exploration_schedule,
init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq, test_freq, save_model)
train_env.close()
evaluator.close()
return best_perf
| 38.987013
| 115
| 0.673051
|
import copy
import torch
import logging
import numpy as np
from sacred import Experiment
from noge.data_loaders import get_datasets, get_test_loader, get_train_generator
from noge.factory import make_env, make_memory
from noge.network import make_network
from noge.agent import Actor, main_loop, loop_ing
from noge.trainers import DQNTrainer, Replay
from noge.policies import LinearSchedule, GraphDQNPolicy
from noge.preprocessors import Preprocessor
from noge.evaluation import Evaluator, eval_ing
from noge.constants import CONFIGS_DIR, EVAL_DIR
from xlog.utils import get_logger
from xlog.mlflow_observer import MlflowObserver
ex = Experiment(name='NOGE_DQN', ingredients=[eval_ing, loop_ing])
ex.add_config(str(CONFIGS_DIR / 'dqn.yaml'))
ex.logger = get_logger(__name__, level=logging.INFO)
ex.observers = [MlflowObserver(tracking_uri=str(EVAL_DIR.absolute()))]
@ex.automain
def train(dataset, test_size, max_episode_steps, reward_type, input_meas_type, meas_transform,
target_transform, node_history, gamma, target_update_freq,
cat_features, feature_range, replay_capacity, min_horizon, epsilon_start, epsilon_end,
exploration_frac, n_train_steps, train_freq, loss, batch_size, lr, n_test_episodes, init_eval,
n_eval_artifacts, test_freq, log_freq, device, seed, data_seed, save_model, _log, _run, _config):
np.set_printoptions(precision=2, suppress=True)
if device.startswith('cuda'):
assert torch.cuda.is_available()
logger = _log
device = torch.device(device)
train_set, test_set = get_datasets(dataset, seed=data_seed, test_size=test_size)
max_nodes = max(train_set.max_nodes, test_set.max_nodes)
max_edges = 2 * max(train_set.max_edges, test_set.max_edges)
test_loader = get_test_loader(test_set, seed=seed, num_samples=n_test_episodes)
train_gen = get_train_generator(train_set, seed=seed)
preprocessor = Preprocessor(input_meas_type=input_meas_type,
output_meas_type=input_meas_type,
feature_range=feature_range,
meas_transform=meas_transform,
target_transform=target_transform,
temporal_offsets=[1.],
max_nodes=max_nodes,
device=device)
train_env_config = dict(
max_episode_steps=max_episode_steps,
reward_type=reward_type,
max_nodes=max_nodes,
max_edges=max_edges,
nn_feat='N' in cat_features,
)
train_env = make_env(**train_env_config, data_generator=train_gen, seed=seed)
test_env_config = copy.deepcopy(train_env_config)
test_env_config.update(sample_goals=False, data_generator=None)
test_env = make_env(**test_env_config, seed=seed)
neg_label, pos_label = feature_range
mem_features = dict(cat=cat_features)
graph_mem_config = dict(
max_episode_steps=max_episode_steps,
max_nodes=max_nodes,
max_edges=max_edges,
history=node_history,
memory_type='cat',
features=mem_features,
neg_label=neg_label,
pos_label=pos_label
)
eval_memory = make_memory(online=True, **graph_mem_config)
acting_memory = make_memory(online=True, **graph_mem_config)
model_config = dict(
dim_node=eval_memory.dim_node,
dim_meas=preprocessor.dim_input_meas,
dim_goal=1,
max_edges=max_edges,
**_config['model']
)
network = make_network(**model_config).to(device)
eval_policy = GraphDQNPolicy(network, eval_memory, preprocessor=preprocessor, device=device)
evaluator = Evaluator(test_loader, test_env, eval_policy)
exploration_steps = int(exploration_frac * n_train_steps)
exploration_schedule = LinearSchedule(epsilon_start, epsilon_end, exploration_steps)
acting_policy = GraphDQNPolicy(network,
graph_memory=acting_memory,
preprocessor=preprocessor,
exploration_schedule=exploration_schedule,
device=device)
replay_buffer = Replay(capacity=replay_capacity,
ob_space=train_env.observation_space,
graph_mem_config=graph_mem_config,
min_horizon=min_horizon)
actor = Actor(train_env, acting_policy, replay_buffer)
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
if loss == 'mse':
criterion = torch.nn.MSELoss()
else:
raise ValueError(f"Unsupported loss: {loss}")
trainer = DQNTrainer(gamma=gamma,
target_update_freq=target_update_freq,
replay_buffer=replay_buffer,
batch_size=batch_size,
network=network,
preprocessor=preprocessor,
criterion=criterion,
optimizer=optimizer,
device=device)
network.eval()
logger.info(f"Filling up the replay buffer...")
actor.step(n=replay_capacity, use_tqdm=True)
logger.info(f"Replay buffer filled: [{len(replay_buffer)} / {replay_capacity}]")
preprocessor.fit(replay_buffer._measurements)
best_perf = main_loop(actor, trainer, evaluator, network, exploration_schedule,
init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq, test_freq, save_model)
train_env.close()
evaluator.close()
return best_perf
| true
| true
|
79024b605c5c11733a72cc63b056b4003d16736e
| 12,159
|
py
|
Python
|
netbox/dcim/filters.py
|
team-telnyx/netbox
|
4980774bec1bc691cf499d7768b2a93be3ba6bb2
|
[
"Apache-2.0"
] | 3
|
2017-05-06T11:21:19.000Z
|
2018-01-10T22:38:30.000Z
|
netbox/dcim/filters.py
|
team-telnyx/netbox
|
4980774bec1bc691cf499d7768b2a93be3ba6bb2
|
[
"Apache-2.0"
] | 1
|
2019-06-29T17:08:47.000Z
|
2019-06-29T17:08:47.000Z
|
netbox/dcim/filters.py
|
team-telnyx/netbox
|
4980774bec1bc691cf499d7768b2a93be3ba6bb2
|
[
"Apache-2.0"
] | 4
|
2017-02-01T22:53:14.000Z
|
2017-06-22T18:05:03.000Z
|
import django_filters
from netaddr.core import AddrFormatError
from django.db.models import Q
from extras.filters import CustomFieldFilterSet
from tenancy.models import Tenant
from utilities.filters import NullableModelMultipleChoiceFilter
from .models import (
ConsolePort, ConsoleServerPort, Device, DeviceRole, DeviceType, Interface, InterfaceConnection, Manufacturer,
Platform, PowerOutlet, PowerPort, Rack, RackGroup, RackRole, Site,
)
class SiteFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = Site
fields = ['q', 'name', 'facility', 'asn']
def search(self, queryset, value):
qs_filter = Q(name__icontains=value) | Q(facility__icontains=value) | Q(physical_address__icontains=value) | \
Q(shipping_address__icontains=value) | Q(comments__icontains=value)
try:
qs_filter |= Q(asn=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class RackGroupFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = RackGroup
class RackFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
group_id = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
label='Group (ID)',
)
group = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
to_field_name='slug',
label='Group',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
role_id = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
label='Role (ID)',
)
role = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
class Meta:
model = Rack
fields = ['u_height']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(facility_id__icontains=value) |
Q(comments__icontains=value)
)
class DeviceTypeFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
class Meta:
model = DeviceType
fields = ['model', 'part_number', 'u_height', 'is_console_server', 'is_pdu', 'is_network_device',
'subdevice_role']
def search(self, queryset, value):
return queryset.filter(
Q(manufacturer__name__icontains=value) |
Q(model__icontains=value) |
Q(part_number__icontains=value) |
Q(comments__icontains=value)
)
class DeviceFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
mac_address = django_filters.MethodFilter(
action='_mac_address',
label='MAC address',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='rack__site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='rack__site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site name (slug)',
)
rack_group_id = django_filters.ModelMultipleChoiceFilter(
name='rack__group',
queryset=RackGroup.objects.all(),
label='Rack group (ID)',
)
rack_id = django_filters.ModelMultipleChoiceFilter(
name='rack',
queryset=Rack.objects.all(),
label='Rack (ID)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
name='device_role',
queryset=DeviceRole.objects.all(),
label='Role (ID)',
)
role = django_filters.ModelMultipleChoiceFilter(
name='device_role__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
device_type_id = django_filters.ModelMultipleChoiceFilter(
name='device_type',
queryset=DeviceType.objects.all(),
label='Device type (ID)',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
model = django_filters.ModelMultipleChoiceFilter(
name='device_type__slug',
queryset=DeviceType.objects.all(),
to_field_name='slug',
label='Device model (slug)',
)
platform_id = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
label='Platform (ID)',
)
platform = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
status = django_filters.BooleanFilter(
name='status',
label='Status',
)
is_console_server = django_filters.BooleanFilter(
name='device_type__is_console_server',
label='Is a console server',
)
is_pdu = django_filters.BooleanFilter(
name='device_type__is_pdu',
label='Is a PDU',
)
is_network_device = django_filters.BooleanFilter(
name='device_type__is_network_device',
label='Is a network device',
)
class Meta:
model = Device
fields = ['name', 'serial', 'asset_tag']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(serial__icontains=value.strip()) |
Q(modules__serial__icontains=value.strip()) |
Q(asset_tag=value.strip()) |
Q(comments__icontains=value)
).distinct()
def _mac_address(self, queryset, value):
value = value.strip()
if not value:
return queryset
try:
return queryset.filter(interfaces__mac_address=value).distinct()
except AddrFormatError:
return queryset.none()
class ConsolePortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsolePort
fields = ['name']
class ConsoleServerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsoleServerPort
fields = ['name']
class PowerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerPort
fields = ['name']
class PowerOutletFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerOutlet
fields = ['name']
class InterfaceFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = Interface
fields = ['name']
class ConsoleConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = ConsoleServerPort
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(cs_port__device__rack__site__slug=value)
class PowerConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = PowerOutlet
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(power_outlet__device__rack__site__slug=value)
class InterfaceConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = InterfaceConnection
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(interface_a__device__rack__site__slug=value) |
Q(interface_b__device__rack__site__slug=value)
)
| 28.95
| 118
| 0.628999
|
import django_filters
from netaddr.core import AddrFormatError
from django.db.models import Q
from extras.filters import CustomFieldFilterSet
from tenancy.models import Tenant
from utilities.filters import NullableModelMultipleChoiceFilter
from .models import (
ConsolePort, ConsoleServerPort, Device, DeviceRole, DeviceType, Interface, InterfaceConnection, Manufacturer,
Platform, PowerOutlet, PowerPort, Rack, RackGroup, RackRole, Site,
)
class SiteFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = Site
fields = ['q', 'name', 'facility', 'asn']
def search(self, queryset, value):
qs_filter = Q(name__icontains=value) | Q(facility__icontains=value) | Q(physical_address__icontains=value) | \
Q(shipping_address__icontains=value) | Q(comments__icontains=value)
try:
qs_filter |= Q(asn=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class RackGroupFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = RackGroup
class RackFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
group_id = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
label='Group (ID)',
)
group = NullableModelMultipleChoiceFilter(
name='group',
queryset=RackGroup.objects.all(),
to_field_name='slug',
label='Group',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
role_id = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
label='Role (ID)',
)
role = NullableModelMultipleChoiceFilter(
name='role',
queryset=RackRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
class Meta:
model = Rack
fields = ['u_height']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(facility_id__icontains=value) |
Q(comments__icontains=value)
)
class DeviceTypeFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
class Meta:
model = DeviceType
fields = ['model', 'part_number', 'u_height', 'is_console_server', 'is_pdu', 'is_network_device',
'subdevice_role']
def search(self, queryset, value):
return queryset.filter(
Q(manufacturer__name__icontains=value) |
Q(model__icontains=value) |
Q(part_number__icontains=value) |
Q(comments__icontains=value)
)
class DeviceFilter(CustomFieldFilterSet, django_filters.FilterSet):
q = django_filters.MethodFilter(
action='search',
label='Search',
)
mac_address = django_filters.MethodFilter(
action='_mac_address',
label='MAC address',
)
site_id = django_filters.ModelMultipleChoiceFilter(
name='rack__site',
queryset=Site.objects.all(),
label='Site (ID)',
)
site = django_filters.ModelMultipleChoiceFilter(
name='rack__site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site name (slug)',
)
rack_group_id = django_filters.ModelMultipleChoiceFilter(
name='rack__group',
queryset=RackGroup.objects.all(),
label='Rack group (ID)',
)
rack_id = django_filters.ModelMultipleChoiceFilter(
name='rack',
queryset=Rack.objects.all(),
label='Rack (ID)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
name='device_role',
queryset=DeviceRole.objects.all(),
label='Role (ID)',
)
role = django_filters.ModelMultipleChoiceFilter(
name='device_role__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
tenant_id = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
device_type_id = django_filters.ModelMultipleChoiceFilter(
name='device_type',
queryset=DeviceType.objects.all(),
label='Device type (ID)',
)
manufacturer_id = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer',
queryset=Manufacturer.objects.all(),
label='Manufacturer (ID)',
)
manufacturer = django_filters.ModelMultipleChoiceFilter(
name='device_type__manufacturer__slug',
queryset=Manufacturer.objects.all(),
to_field_name='slug',
label='Manufacturer (slug)',
)
model = django_filters.ModelMultipleChoiceFilter(
name='device_type__slug',
queryset=DeviceType.objects.all(),
to_field_name='slug',
label='Device model (slug)',
)
platform_id = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
label='Platform (ID)',
)
platform = NullableModelMultipleChoiceFilter(
name='platform',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
status = django_filters.BooleanFilter(
name='status',
label='Status',
)
is_console_server = django_filters.BooleanFilter(
name='device_type__is_console_server',
label='Is a console server',
)
is_pdu = django_filters.BooleanFilter(
name='device_type__is_pdu',
label='Is a PDU',
)
is_network_device = django_filters.BooleanFilter(
name='device_type__is_network_device',
label='Is a network device',
)
class Meta:
model = Device
fields = ['name', 'serial', 'asset_tag']
def search(self, queryset, value):
return queryset.filter(
Q(name__icontains=value) |
Q(serial__icontains=value.strip()) |
Q(modules__serial__icontains=value.strip()) |
Q(asset_tag=value.strip()) |
Q(comments__icontains=value)
).distinct()
def _mac_address(self, queryset, value):
value = value.strip()
if not value:
return queryset
try:
return queryset.filter(interfaces__mac_address=value).distinct()
except AddrFormatError:
return queryset.none()
class ConsolePortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsolePort
fields = ['name']
class ConsoleServerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = ConsoleServerPort
fields = ['name']
class PowerPortFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerPort
fields = ['name']
class PowerOutletFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = PowerOutlet
fields = ['name']
class InterfaceFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = Interface
fields = ['name']
class ConsoleConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = ConsoleServerPort
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(cs_port__device__rack__site__slug=value)
class PowerConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = PowerOutlet
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(power_outlet__device__rack__site__slug=value)
class InterfaceConnectionFilter(django_filters.FilterSet):
site = django_filters.MethodFilter(
action='filter_site',
label='Site (slug)',
)
class Meta:
model = InterfaceConnection
def filter_site(self, queryset, value):
value = value.strip()
if not value:
return queryset
return queryset.filter(
Q(interface_a__device__rack__site__slug=value) |
Q(interface_b__device__rack__site__slug=value)
)
| true
| true
|
79024b64d46a47c2f213ec37d276a25c094aad81
| 4,020
|
py
|
Python
|
compiler-rt/test/memprof/lit.cfg.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
compiler-rt/test/memprof/lit.cfg.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
compiler-rt/test/memprof/lit.cfg.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
# -*- Python -*-
import os
import platform
import re
import lit.formats
# Get shlex.quote if available (added in 3.3), and fall back to pipes.quote if
# it's not available.
try:
import shlex
sh_quote = shlex.quote
except:
import pipes
sh_quote = pipes.quote
def get_required_attr(config, attr_name):
attr_value = getattr(config, attr_name, None)
if attr_value == None:
lit_config.fatal(
"No attribute %r in test configuration! You may need to run "
"tests from your build directory or add this attribute "
"to lit.site.cfg.py " % attr_name)
return attr_value
# Setup config name.
config.name = 'MemProfiler' + config.name_suffix
# Platform-specific default MEMPROF_OPTIONS for lit tests.
default_memprof_opts = list(config.default_sanitizer_opts)
default_memprof_opts_str = ':'.join(default_memprof_opts)
if default_memprof_opts_str:
config.environment['MEMPROF_OPTIONS'] = default_memprof_opts_str
config.substitutions.append(('%env_memprof_opts=',
'env MEMPROF_OPTIONS=' + default_memprof_opts_str))
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
libdl_flag = '-ldl'
# Setup default compiler flags used with -fmemory-profile option.
# FIXME: Review the set of required flags and check if it can be reduced.
target_cflags = [get_required_attr(config, 'target_cflags')]
target_cxxflags = config.cxx_mode_flags + target_cflags
clang_memprof_static_cflags = (['-fmemory-profile',
'-mno-omit-leaf-frame-pointer',
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls'] +
config.debug_info_flags + target_cflags)
clang_memprof_static_cxxflags = config.cxx_mode_flags + clang_memprof_static_cflags
memprof_dynamic_flags = []
if config.memprof_dynamic:
memprof_dynamic_flags = ['-shared-libsan']
config.available_features.add('memprof-dynamic-runtime')
else:
config.available_features.add('memprof-static-runtime')
clang_memprof_cflags = clang_memprof_static_cflags + memprof_dynamic_flags
clang_memprof_cxxflags = clang_memprof_static_cxxflags + memprof_dynamic_flags
def build_invocation(compile_flags):
return ' ' + ' '.join([config.clang] + compile_flags) + ' '
config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
config.substitutions.append( ("%clang_memprof ", build_invocation(clang_memprof_cflags)) )
config.substitutions.append( ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags)) )
if config.memprof_dynamic:
shared_libmemprof_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.memprof{}.so'.format(config.target_suffix))
config.substitutions.append( ("%shared_libmemprof", shared_libmemprof_path) )
config.substitutions.append( ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags)) )
config.substitutions.append( ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags)) )
# Some tests uses C++11 features such as lambdas and need to pass -std=c++11.
config.substitutions.append(("%stdcxx11 ", '-std=c++11 '))
config.substitutions.append( ("%libdl", libdl_flag) )
config.available_features.add('memprof-' + config.bits + '-bits')
config.available_features.add('fast-unwinder-works')
# Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
new_ld_library_path = os.path.pathsep.join(
(config.compiler_rt_libdir, config.environment.get('LD_LIBRARY_PATH', '')))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
# Default test suffixes.
config.suffixes = ['.c', '.cpp']
config.substitutions.append(('%fPIC', '-fPIC'))
config.substitutions.append(('%fPIE', '-fPIE'))
config.substitutions.append(('%pie', '-pie'))
# Only run the tests on supported OSs.
if config.host_os not in ['Linux']:
config.unsupported = True
if not config.parallelism_group:
config.parallelism_group = 'shadow-memory'
| 38.653846
| 123
| 0.746766
|
import os
import platform
import re
import lit.formats
try:
import shlex
sh_quote = shlex.quote
except:
import pipes
sh_quote = pipes.quote
def get_required_attr(config, attr_name):
attr_value = getattr(config, attr_name, None)
if attr_value == None:
lit_config.fatal(
"No attribute %r in test configuration! You may need to run "
"tests from your build directory or add this attribute "
"to lit.site.cfg.py " % attr_name)
return attr_value
# Setup config name.
config.name = 'MemProfiler' + config.name_suffix
# Platform-specific default MEMPROF_OPTIONS for lit tests.
default_memprof_opts = list(config.default_sanitizer_opts)
default_memprof_opts_str = ':'.join(default_memprof_opts)
if default_memprof_opts_str:
config.environment['MEMPROF_OPTIONS'] = default_memprof_opts_str
config.substitutions.append(('%env_memprof_opts=',
'env MEMPROF_OPTIONS=' + default_memprof_opts_str))
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
libdl_flag = '-ldl'
# Setup default compiler flags used with -fmemory-profile option.
# FIXME: Review the set of required flags and check if it can be reduced.
target_cflags = [get_required_attr(config, 'target_cflags')]
target_cxxflags = config.cxx_mode_flags + target_cflags
clang_memprof_static_cflags = (['-fmemory-profile',
'-mno-omit-leaf-frame-pointer',
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls'] +
config.debug_info_flags + target_cflags)
clang_memprof_static_cxxflags = config.cxx_mode_flags + clang_memprof_static_cflags
memprof_dynamic_flags = []
if config.memprof_dynamic:
memprof_dynamic_flags = ['-shared-libsan']
config.available_features.add('memprof-dynamic-runtime')
else:
config.available_features.add('memprof-static-runtime')
clang_memprof_cflags = clang_memprof_static_cflags + memprof_dynamic_flags
clang_memprof_cxxflags = clang_memprof_static_cxxflags + memprof_dynamic_flags
def build_invocation(compile_flags):
return ' ' + ' '.join([config.clang] + compile_flags) + ' '
config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
config.substitutions.append( ("%clang_memprof ", build_invocation(clang_memprof_cflags)) )
config.substitutions.append( ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags)) )
if config.memprof_dynamic:
shared_libmemprof_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.memprof{}.so'.format(config.target_suffix))
config.substitutions.append( ("%shared_libmemprof", shared_libmemprof_path) )
config.substitutions.append( ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags)) )
config.substitutions.append( ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags)) )
# Some tests uses C++11 features such as lambdas and need to pass -std=c++11.
config.substitutions.append(("%stdcxx11 ", '-std=c++11 '))
config.substitutions.append( ("%libdl", libdl_flag) )
config.available_features.add('memprof-' + config.bits + '-bits')
config.available_features.add('fast-unwinder-works')
# Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
new_ld_library_path = os.path.pathsep.join(
(config.compiler_rt_libdir, config.environment.get('LD_LIBRARY_PATH', '')))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
# Default test suffixes.
config.suffixes = ['.c', '.cpp']
config.substitutions.append(('%fPIC', '-fPIC'))
config.substitutions.append(('%fPIE', '-fPIE'))
config.substitutions.append(('%pie', '-pie'))
# Only run the tests on supported OSs.
if config.host_os not in ['Linux']:
config.unsupported = True
if not config.parallelism_group:
config.parallelism_group = 'shadow-memory'
| true
| true
|
79024ddeebc5ee8d5c763be743de9b8fc8039427
| 3,752
|
py
|
Python
|
PyMC2/database/base.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-03-01T02:47:20.000Z
|
2019-03-01T02:47:20.000Z
|
PyMC2/database/base.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-08-17T06:58:38.000Z
|
2019-08-17T06:58:38.000Z
|
PyMC2/database/base.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | null | null | null |
"""
Base backend
Trace and Database classes from the other modules should Subclass the base
classes.
"""
import PyMC2
class Trace(object):
"""Dummy Trace class.
"""
def __init__(self,value=None, obj=None):
"""Assign an initial value and an internal PyMC object."""
self._trace = value
if obj is not None:
if isinstance(obj, PyMC2.PyMCBase):
self._obj = obj
else:
raise AttributeError, 'Not PyMC object', obj
def _initialize(self, length):
"""Dummy method. Subclass if necessary."""
pass
def tally(self, index):
"""Dummy method. Subclass if necessary."""
pass
def truncate(self, index):
"""Dummy method. Subclass if necessary."""
pass
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Dummy method. Subclass if necessary.
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
raise AttributeError, self._obj.__name__ + " has no trace"
__call__ = gettrace
## def obj():
## def fset(self, obj):
## if isinstance(obj, PyMC2.PyMCBase):
## self.__obj = obj
## else:
## raise AttributeError, 'Not PyMC object'
## def fget(self):
## return self.__obj
## return locals()
## obj = property(**obj())
def _finalize(self):
pass
class Database(object):
"""Dummy Database backend"""
def __init__(self):
"""Get the Trace from the local scope."""
self.Trace = Trace
def _initialize(self, length):
"""Tell the traces to initialize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._initialize(length)
def tally(self, index):
"""Dummy method. Subclass if necessary."""
for o in self.model._pymc_objects_to_tally:
o.trace.tally(index)
def connect(self, sampler):
"""Link the Database to the Sampler instance.
If database is loaded from a file, restore the objects trace
to their stored value, if a new database is created, instantiate
a Trace for the PyMC objects to tally.
"""
if isinstance(sampler, PyMC2.Sampler):
self.model = sampler
else:
raise AttributeError, 'Not a Sampler instance.'
if hasattr(self, '_state_'):
# Restore the state of the Sampler.
for o in sampler._pymc_objects_to_tally:
o.trace = getattr(self, o.__name__)
o.trace._obj = o
else:
# Set a fresh new state
for o in sampler._pymc_objects_to_tally:
o.trace = self.Trace(obj=o)
for o in sampler._pymc_objects_to_tally:
o.trace.db = self
def _finalize(self):
"""Tell the traces to finalize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._finalize()
def close(self):
"""Close the database."""
pass
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
SamplingMethods."""
self._state_ = state
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
SamplingMethods."""
return self._state_
| 31.008264
| 86
| 0.559701
|
"""
Base backend
Trace and Database classes from the other modules should Subclass the base
classes.
"""
import PyMC2
class Trace(object):
"""Dummy Trace class.
"""
def __init__(self,value=None, obj=None):
"""Assign an initial value and an internal PyMC object."""
self._trace = value
if obj is not None:
if isinstance(obj, PyMC2.PyMCBase):
self._obj = obj
else:
raise AttributeError, 'Not PyMC object', obj
def _initialize(self, length):
"""Dummy method. Subclass if necessary."""
pass
def tally(self, index):
"""Dummy method. Subclass if necessary."""
pass
def truncate(self, index):
"""Dummy method. Subclass if necessary."""
pass
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Dummy method. Subclass if necessary.
Input:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
raise AttributeError, self._obj.__name__ + " has no trace"
__call__ = gettrace
mselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._initialize(length)
def tally(self, index):
"""Dummy method. Subclass if necessary."""
for o in self.model._pymc_objects_to_tally:
o.trace.tally(index)
def connect(self, sampler):
"""Link the Database to the Sampler instance.
If database is loaded from a file, restore the objects trace
to their stored value, if a new database is created, instantiate
a Trace for the PyMC objects to tally.
"""
if isinstance(sampler, PyMC2.Sampler):
self.model = sampler
else:
raise AttributeError, 'Not a Sampler instance.'
if hasattr(self, '_state_'):
for o in sampler._pymc_objects_to_tally:
o.trace = getattr(self, o.__name__)
o.trace._obj = o
else:
for o in sampler._pymc_objects_to_tally:
o.trace = self.Trace(obj=o)
for o in sampler._pymc_objects_to_tally:
o.trace.db = self
def _finalize(self):
"""Tell the traces to finalize themselves."""
for o in self.model._pymc_objects_to_tally:
o.trace._finalize()
def close(self):
"""Close the database."""
pass
def savestate(self, state):
"""Store a dictionnary containing the state of the Sampler and its
SamplingMethods."""
self._state_ = state
def getstate(self):
"""Return a dictionary containing the state of the Sampler and its
SamplingMethods."""
return self._state_
| false
| true
|
79024e36049ca2d6b893826abf487e64c7243325
| 8,871
|
py
|
Python
|
tools/bin_to_rwd.py
|
bccw-ai/rwd-xray
|
a0f6cd2dbce644dde858db16c248ddcd34420fd2
|
[
"MIT"
] | null | null | null |
tools/bin_to_rwd.py
|
bccw-ai/rwd-xray
|
a0f6cd2dbce644dde858db16c248ddcd34420fd2
|
[
"MIT"
] | null | null | null |
tools/bin_to_rwd.py
|
bccw-ai/rwd-xray
|
a0f6cd2dbce644dde858db16c248ddcd34420fd2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Convert full firmware binary to rwd patch.
# Supported models:
# CR-V 5g (part num: 39990-TLA), tested
# Civic 2016 sedan (part num: 39990-TBA), tested
# Civic 2016 hatchback Australia (part num: 39990-TEA), tested
# Civic 2016 hatchback (part num: 39990-TGG), tested
#
import os
import sys
import argparse
import subprocess
import struct
# Decryption lookup table built from Civic 2016 sedan bin/rwd, also apply to CR-V 5g.
default_decrypt_lookup_table = {144: 72, 218: 55, 255: 255, 164: 1, 195: 26, 99: 2, 28: 178, 205: 158, 125: 138, 45: 118, 222: 98, 142: 78, 62: 58, 243: 38, 163: 18, 83: 254, 3: 234, 172: 214, 92: 194, 12: 174, 189: 154, 109: 134, 29: 114, 206: 94, 126: 74, 46: 54, 227: 34, 147: 14, 113: 0, 67: 250, 236: 230, 156: 210, 76: 190, 252: 170, 173: 150, 93: 130, 13: 110, 148: 253, 120: 159, 199: 148, 198: 137, 77: 126, 23: 104, 73: 83, 203: 73, 78: 62, 123: 53, 254: 42, 43: 33, 90: 23, 161: 12, 10: 3, 132: 249, 191: 239, 226: 220, 197: 201, 248: 191, 117: 181, 34: 172, 37: 161, 88: 151, 141: 142, 8: 131, 134: 121, 185: 111, 54: 101, 190: 90, 57: 79, 128: 68, 139: 57, 14: 46, 138: 35, 131: 10, 100: 241, 1: 228, 146: 200, 133: 185, 168: 171, 104: 155, 40: 139, 251: 85, 94: 66, 91: 45, 103: 124, 55: 112, 231: 156, 80: 56, 224: 92, 102: 113, 96: 60, 98: 188, 97: 252, 140: 206, 122: 31, 232: 187, 16: 40, 202: 51, 26: 7, 239: 251, 5: 153, 219: 77, 119: 128, 21: 157, 238: 102, 180: 5, 217: 119, 30: 50, 7: 100, 32: 44, 183: 144, 50: 176, 110: 70, 157: 146, 2: 164, 44: 182, 145: 8, 58: 15, 27: 29, 64: 52, 9: 67, 31: 199, 179: 22, 42: 11, 193: 20, 211: 30, 129: 4, 241: 32, 74: 19, 178: 208, 247: 160, 112: 64, 242: 224, 114: 192, 165: 193, 0: 36, 59: 37, 196: 9, 154: 39, 75: 41, 72: 147, 249: 127, 162: 204, 130: 196, 229: 209, 182: 133, 48: 48, 86: 109, 240: 96, 137: 99, 151: 136, 209: 24, 108: 198, 181: 197, 212: 13, 244: 21, 11: 25, 118: 117, 228: 17, 214: 141, 52: 229, 160: 76, 115: 6, 106: 27, 56: 143, 25: 71, 36: 225, 194: 212, 208: 88, 187: 69, 171: 65, 153: 103, 38: 97, 207: 243, 82: 184, 184: 175, 188: 218, 213: 205, 121: 95, 15: 195, 81: 248, 24: 135, 70: 105, 150: 125, 174: 86, 158: 82, 220: 226, 201: 115, 71: 116, 51: 246, 177: 16, 176: 80, 22: 93, 39: 108, 159: 231, 223: 247, 186: 47, 169: 107, 245: 213, 235: 81, 192: 84, 124: 202, 175: 235, 84: 237, 79: 211, 234: 59, 143: 227, 237: 166, 33: 236, 253: 106, 65: 244, 111: 219, 200: 179, 101: 177, 17: 232, 20: 221, 166: 129, 60: 186, 61: 122, 167: 140, 204: 222, 87: 120, 41: 75, 135: 132, 136: 163, 49: 240, 250: 63, 107: 49, 170: 43, 18: 168, 221: 162, 35: 242, 225: 28, 149: 189, 85: 173, 152: 167, 95: 215, 53: 165, 89: 87, 66: 180, 6: 89, 47: 203, 210: 216, 215: 152, 233: 123, 116: 245, 127: 223, 19: 238, 69: 169, 105: 91, 4: 217, 216: 183, 68: 233, 63: 207, 155: 61, 246: 149, 230: 145}
# sum of x, x is unsigned shorts
def checksum_by_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += struct.unpack('!H', fw[i:i + 2])[0]
return s
# sum of -x, x is unsigned shorts
def checksum_by_negative_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += -struct.unpack('!H', fw[i:i + 2])[0]
return s
checksum_funcs = [checksum_by_sum, checksum_by_negative_sum]
car_models = {
'39990-TLA-A030': { #CR-V thanks to joe1
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TLA-A030', '39990-TLA-A040', '39990-TLA,A030', '39990-TLA,A040'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x6c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x6bf80), (1, 0x6bffe)] #original bin checksums are 0x419b at offset 0x6FF80 and 0x24ef at 0x6FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TBA-A030': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBA-A000', '39990-TBA-A010', '39990-TBA-A020', '39990-TBA-A030'],
'security-key': ['0x011100121020', '0x011100121020', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TEA-T330': { #civic hatch au thanks to ming
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-T330'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TEA-H010': { # bccw test
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-H010', '39990-TEA-H020', '39990-TEA,H020'],
'security-key': ['0x0111011211', '0x0111011211', '0x0111011211'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TGG-A120': { #civic hatch thanks to R3DLOBST3R
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TGG-A120'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TRW-A020': { #clarity thanks to wirelessnet2
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TRW-A010', '39990-TRW-A020', '39990-TRW,A010', '39990-TRW,A020'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
#(checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TBX-3050': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBX-H110', '39990-TBX-H120', '39990-TBX-3050'],
'security-key': ['0x0211021212', '0x0211021212', '0x0211021212'],
'encryption-key': '0xbf109e',
'start-address': 0x13000,
'data-size': 0xed000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
}
def main():
# example: python3 bin_to_rwd.py --input_bin crv_5g_user_patched.bin --model 39990-TLA-A030
parser = argparse.ArgumentParser()
parser.add_argument("--input_bin", required=True, help="Full firmware binary file")
parser.add_argument("--model", default='39990-TLA-A030', help="EPS part number")
args = parser.parse_args()
if not args.model in car_models:
print('Car model %s not found' % args.model)
sys.exit(-1)
print('Creating rwd for model %s' % args.model)
m = car_models[args.model]
if not os.path.exists(args.input_bin):
print('%s not found' % args.input_bin)
sys.exit(-1)
encrypt_lookup_table = {}
for k, v in default_decrypt_lookup_table.items():
encrypt_lookup_table[v] = k
with open(args.input_bin, 'rb') as f:
full_fw = f.read()
patch_fw = full_fw[m['start-address']:(m['start-address'] + m['data-size'])]
for func_idx, off in m['checksum-offsets']:
old_checksum = struct.unpack('!H', patch_fw[off:off+2])[0] & 0xFFFF
new_checksum = checksum_funcs[func_idx](patch_fw, 0, off) & 0xFFFF
print('Update checksum at offset %s from %s to %s' % (hex(off), hex(old_checksum), hex(new_checksum)))
patch_fw = patch_fw[:off] + struct.pack('!H', new_checksum & 0xFFFF) + patch_fw[off+2:]
encrypted = bytearray()
for b in patch_fw:
encrypted.append(encrypt_lookup_table[b])
out_enc_path = args.input_bin + '.enc'
with open(out_enc_path, 'wb') as out_f:
out_f.write(encrypted)
print('Encryption done, saved to %s.' % out_enc_path)
cur_dir = os.path.dirname(os.path.abspath(__file__))
cmds = [
'python2',
'rwd-builder.py',
'--can-address', m['can-address'],
'--supported-versions', *m['supported-versions'],
'--security-key', *m['security-key'],
'--encryption-key', m['encryption-key'],
'--encrypted-file', out_enc_path,
'--start-address', hex(m['start-address']),
'--data-size', hex(m['data-size'])
]
subprocess.check_call(cmds, cwd=cur_dir)
print('RWD file %s created.' % (out_enc_path[:-4] + '.rwd'))
if __name__== "__main__":
main()
| 51.277457
| 2,371
| 0.631496
|
import os
import sys
import argparse
import subprocess
import struct
default_decrypt_lookup_table = {144: 72, 218: 55, 255: 255, 164: 1, 195: 26, 99: 2, 28: 178, 205: 158, 125: 138, 45: 118, 222: 98, 142: 78, 62: 58, 243: 38, 163: 18, 83: 254, 3: 234, 172: 214, 92: 194, 12: 174, 189: 154, 109: 134, 29: 114, 206: 94, 126: 74, 46: 54, 227: 34, 147: 14, 113: 0, 67: 250, 236: 230, 156: 210, 76: 190, 252: 170, 173: 150, 93: 130, 13: 110, 148: 253, 120: 159, 199: 148, 198: 137, 77: 126, 23: 104, 73: 83, 203: 73, 78: 62, 123: 53, 254: 42, 43: 33, 90: 23, 161: 12, 10: 3, 132: 249, 191: 239, 226: 220, 197: 201, 248: 191, 117: 181, 34: 172, 37: 161, 88: 151, 141: 142, 8: 131, 134: 121, 185: 111, 54: 101, 190: 90, 57: 79, 128: 68, 139: 57, 14: 46, 138: 35, 131: 10, 100: 241, 1: 228, 146: 200, 133: 185, 168: 171, 104: 155, 40: 139, 251: 85, 94: 66, 91: 45, 103: 124, 55: 112, 231: 156, 80: 56, 224: 92, 102: 113, 96: 60, 98: 188, 97: 252, 140: 206, 122: 31, 232: 187, 16: 40, 202: 51, 26: 7, 239: 251, 5: 153, 219: 77, 119: 128, 21: 157, 238: 102, 180: 5, 217: 119, 30: 50, 7: 100, 32: 44, 183: 144, 50: 176, 110: 70, 157: 146, 2: 164, 44: 182, 145: 8, 58: 15, 27: 29, 64: 52, 9: 67, 31: 199, 179: 22, 42: 11, 193: 20, 211: 30, 129: 4, 241: 32, 74: 19, 178: 208, 247: 160, 112: 64, 242: 224, 114: 192, 165: 193, 0: 36, 59: 37, 196: 9, 154: 39, 75: 41, 72: 147, 249: 127, 162: 204, 130: 196, 229: 209, 182: 133, 48: 48, 86: 109, 240: 96, 137: 99, 151: 136, 209: 24, 108: 198, 181: 197, 212: 13, 244: 21, 11: 25, 118: 117, 228: 17, 214: 141, 52: 229, 160: 76, 115: 6, 106: 27, 56: 143, 25: 71, 36: 225, 194: 212, 208: 88, 187: 69, 171: 65, 153: 103, 38: 97, 207: 243, 82: 184, 184: 175, 188: 218, 213: 205, 121: 95, 15: 195, 81: 248, 24: 135, 70: 105, 150: 125, 174: 86, 158: 82, 220: 226, 201: 115, 71: 116, 51: 246, 177: 16, 176: 80, 22: 93, 39: 108, 159: 231, 223: 247, 186: 47, 169: 107, 245: 213, 235: 81, 192: 84, 124: 202, 175: 235, 84: 237, 79: 211, 234: 59, 143: 227, 237: 166, 33: 236, 253: 106, 65: 244, 111: 219, 200: 179, 101: 177, 17: 232, 20: 221, 166: 129, 60: 186, 61: 122, 167: 140, 204: 222, 87: 120, 41: 75, 135: 132, 136: 163, 49: 240, 250: 63, 107: 49, 170: 43, 18: 168, 221: 162, 35: 242, 225: 28, 149: 189, 85: 173, 152: 167, 95: 215, 53: 165, 89: 87, 66: 180, 6: 89, 47: 203, 210: 216, 215: 152, 233: 123, 116: 245, 127: 223, 19: 238, 69: 169, 105: 91, 4: 217, 216: 183, 68: 233, 63: 207, 155: 61, 246: 149, 230: 145}
def checksum_by_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += struct.unpack('!H', fw[i:i + 2])[0]
return s
def checksum_by_negative_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += -struct.unpack('!H', fw[i:i + 2])[0]
return s
checksum_funcs = [checksum_by_sum, checksum_by_negative_sum]
car_models = {
'39990-TLA-A030': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TLA-A030', '39990-TLA-A040', '39990-TLA,A030', '39990-TLA,A040'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x6c000,
'checksum-offsets': [(0, 0x6bf80), (1, 0x6bffe)]
},
'39990-TBA-A030': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBA-A000', '39990-TBA-A010', '39990-TBA-A020', '39990-TBA-A030'],
'security-key': ['0x011100121020', '0x011100121020', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TEA-T330': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-T330'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TEA-H010': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-H010', '39990-TEA-H020', '39990-TEA,H020'],
'security-key': ['0x0111011211', '0x0111011211', '0x0111011211'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TGG-A120': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TGG-A120'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TRW-A020': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TRW-A010', '39990-TRW-A020', '39990-TRW,A010', '39990-TRW,A020'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TBX-3050': {
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBX-H110', '39990-TBX-H120', '39990-TBX-3050'],
'security-key': ['0x0211021212', '0x0211021212', '0x0211021212'],
'encryption-key': '0xbf109e',
'start-address': 0x13000,
'data-size': 0xed000,
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_bin", required=True, help="Full firmware binary file")
parser.add_argument("--model", default='39990-TLA-A030', help="EPS part number")
args = parser.parse_args()
if not args.model in car_models:
print('Car model %s not found' % args.model)
sys.exit(-1)
print('Creating rwd for model %s' % args.model)
m = car_models[args.model]
if not os.path.exists(args.input_bin):
print('%s not found' % args.input_bin)
sys.exit(-1)
encrypt_lookup_table = {}
for k, v in default_decrypt_lookup_table.items():
encrypt_lookup_table[v] = k
with open(args.input_bin, 'rb') as f:
full_fw = f.read()
patch_fw = full_fw[m['start-address']:(m['start-address'] + m['data-size'])]
for func_idx, off in m['checksum-offsets']:
old_checksum = struct.unpack('!H', patch_fw[off:off+2])[0] & 0xFFFF
new_checksum = checksum_funcs[func_idx](patch_fw, 0, off) & 0xFFFF
print('Update checksum at offset %s from %s to %s' % (hex(off), hex(old_checksum), hex(new_checksum)))
patch_fw = patch_fw[:off] + struct.pack('!H', new_checksum & 0xFFFF) + patch_fw[off+2:]
encrypted = bytearray()
for b in patch_fw:
encrypted.append(encrypt_lookup_table[b])
out_enc_path = args.input_bin + '.enc'
with open(out_enc_path, 'wb') as out_f:
out_f.write(encrypted)
print('Encryption done, saved to %s.' % out_enc_path)
cur_dir = os.path.dirname(os.path.abspath(__file__))
cmds = [
'python2',
'rwd-builder.py',
'--can-address', m['can-address'],
'--supported-versions', *m['supported-versions'],
'--security-key', *m['security-key'],
'--encryption-key', m['encryption-key'],
'--encrypted-file', out_enc_path,
'--start-address', hex(m['start-address']),
'--data-size', hex(m['data-size'])
]
subprocess.check_call(cmds, cwd=cur_dir)
print('RWD file %s created.' % (out_enc_path[:-4] + '.rwd'))
if __name__== "__main__":
main()
| true
| true
|
79024ef9d1ad387a6eaf7e488d9743a6ca5c726e
| 1,844
|
py
|
Python
|
scrapers/get_collections.py
|
fedorov/actcianable
|
c5e215df5dd3958766e41f52aa5e0747e58420f5
|
[
"MIT"
] | null | null | null |
scrapers/get_collections.py
|
fedorov/actcianable
|
c5e215df5dd3958766e41f52aa5e0747e58420f5
|
[
"MIT"
] | 11
|
2020-12-30T16:22:51.000Z
|
2022-03-31T15:01:17.000Z
|
scrapers/get_collections.py
|
fedorov/actcianable
|
c5e215df5dd3958766e41f52aa5e0747e58420f5
|
[
"MIT"
] | null | null | null |
#from collections import Counter
import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
import backoff
import json
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_time=60)
def get_url(url):#, headers):
return requests.get(url) #, headers=headers)
URL = 'http://www.cancerimagingarchive.net/collections/'
page = get_url(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find(id="tablepress-9")
#print(table.prettify())
rows = table.find_all("tr")
analysis_details = []
with open("output/image_analyses_details.json") as analysis_details_file:
analysis_details = json.load(analysis_details_file)
print("analysis details:")
print(analysis_details)
table = []
header = "Collection,DOI,CancerType,Location,Species,Subjects,ImageTypes,SupportingData,Access,Status,Updated".split(",")
for row in rows:
trow = {}
cols = row.find_all("td")
for cid, col in enumerate(cols):
if cid == 0:
trow[header[0]] = col.find("a").text
trow[header[1]] = col.find("a")["href"]
if not trow[header[1]].startswith("http"):
trow[header[1]] = "http:"+col.find("a")["href"]
else:
trow[header[cid+1]] = col.text
if len(trow):
table = table + [trow]
if trow["SupportingData"].find("Image Analyses")>=0:
if trow["Collection"] not in [ i["Collection"] for i in analysis_details]:
analysis_details.append({"Collection": trow["Collection"], "DOI":trow["DOI"], "Format":"", "CollectionType": "original", "DICOMstatus": "", "DICOMtarget": "", "Comment": ""})
print(len(rows))
with open("output/collections.json", "w") as f:
f.write(json.dumps(table, indent=2))
with open("output/image_analyses_details.json", "w") as f:
f.write(json.dumps(analysis_details, indent=2))
| 27.117647
| 182
| 0.676247
|
import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
import backoff
import json
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_time=60)
def get_url(url):
return requests.get(url)
URL = 'http://www.cancerimagingarchive.net/collections/'
page = get_url(URL)
soup = BeautifulSoup(page.content, "html.parser")
table = soup.find(id="tablepress-9")
rows = table.find_all("tr")
analysis_details = []
with open("output/image_analyses_details.json") as analysis_details_file:
analysis_details = json.load(analysis_details_file)
print("analysis details:")
print(analysis_details)
table = []
header = "Collection,DOI,CancerType,Location,Species,Subjects,ImageTypes,SupportingData,Access,Status,Updated".split(",")
for row in rows:
trow = {}
cols = row.find_all("td")
for cid, col in enumerate(cols):
if cid == 0:
trow[header[0]] = col.find("a").text
trow[header[1]] = col.find("a")["href"]
if not trow[header[1]].startswith("http"):
trow[header[1]] = "http:"+col.find("a")["href"]
else:
trow[header[cid+1]] = col.text
if len(trow):
table = table + [trow]
if trow["SupportingData"].find("Image Analyses")>=0:
if trow["Collection"] not in [ i["Collection"] for i in analysis_details]:
analysis_details.append({"Collection": trow["Collection"], "DOI":trow["DOI"], "Format":"", "CollectionType": "original", "DICOMstatus": "", "DICOMtarget": "", "Comment": ""})
print(len(rows))
with open("output/collections.json", "w") as f:
f.write(json.dumps(table, indent=2))
with open("output/image_analyses_details.json", "w") as f:
f.write(json.dumps(analysis_details, indent=2))
| true
| true
|
7902501568578fbd913dc07154688961a902b6c8
| 973
|
py
|
Python
|
cgate/cgate.py
|
buddseye/cerberus-gate
|
0bb47ea0e18e9015a5e307598737a6f7d994ac0b
|
[
"MIT"
] | null | null | null |
cgate/cgate.py
|
buddseye/cerberus-gate
|
0bb47ea0e18e9015a5e307598737a6f7d994ac0b
|
[
"MIT"
] | null | null | null |
cgate/cgate.py
|
buddseye/cerberus-gate
|
0bb47ea0e18e9015a5e307598737a6f7d994ac0b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import argparse
from cgate.reader import readfile, readschema, get_dtype
from cgate.validation import validate
def main():
parser = argparse.ArgumentParser()
parser.add_argument('target', help='Table name or File path')
parser.add_argument('--schema', '-s', help='Cerberus schema file')
parser.add_argument('--null', '-n', help='Null character', default='NULL,\\N')
args = parser.parse_args()
schema = readschema(args.schema)
try:
header = schema['header']
except:
header = None
na_values = args.null.split(',')
dtype, date_cols = get_dtype(schema['schema'])
dfs = readfile(args.target, header=header, dtype=dtype, parse_dates=date_cols, na_values=na_values)
fail_count = validate(dfs, schema['schema'])
if fail_count != 0:
print('Failed {0} error...'.format(fail_count), file=sys.stderr)
return 1
print('Success!', file=sys.stderr)
return 0
| 32.433333
| 103
| 0.664954
|
import sys
import argparse
from cgate.reader import readfile, readschema, get_dtype
from cgate.validation import validate
def main():
parser = argparse.ArgumentParser()
parser.add_argument('target', help='Table name or File path')
parser.add_argument('--schema', '-s', help='Cerberus schema file')
parser.add_argument('--null', '-n', help='Null character', default='NULL,\\N')
args = parser.parse_args()
schema = readschema(args.schema)
try:
header = schema['header']
except:
header = None
na_values = args.null.split(',')
dtype, date_cols = get_dtype(schema['schema'])
dfs = readfile(args.target, header=header, dtype=dtype, parse_dates=date_cols, na_values=na_values)
fail_count = validate(dfs, schema['schema'])
if fail_count != 0:
print('Failed {0} error...'.format(fail_count), file=sys.stderr)
return 1
print('Success!', file=sys.stderr)
return 0
| true
| true
|
7902506ea25b083fd1ae68eb7255136c1a1102d4
| 7,734
|
py
|
Python
|
api/cloud_provider/models.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | 3
|
2019-11-29T03:49:08.000Z
|
2020-07-29T02:52:51.000Z
|
api/cloud_provider/models.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | null | null | null |
api/cloud_provider/models.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | 1
|
2020-03-04T00:29:29.000Z
|
2020-03-04T00:29:29.000Z
|
import os
import threading
import uuid
from ipaddress import ip_address, ip_interface, ip_network
import yaml
from django.db import models
from ansible_api.models.mixins import AbstractExecutionModel
from cloud_provider import get_cloud_client
from common import models as common_models
from fit2ansible import settings
from django.utils.translation import ugettext_lazy as _
from kubeops_api.models.host import Host
class CloudProviderTemplate(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = common_models.JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template_dir = os.path.join(settings.BASE_DIR, 'resource', 'clouds')
@property
def path(self):
return os.path.join(self.template_dir, self.name)
@classmethod
def lookup(cls):
for d in os.listdir(cls.template_dir):
full_path = os.path.join(cls.template_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
cls.objects.update_or_create(defaults=defaults, name=d)
class Region(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template = models.ForeignKey('CloudProviderTemplate', on_delete=models.SET_NULL, null=True)
cloud_region = models.CharField(max_length=128, null=True, default=None)
vars = common_models.JsonDictTextField(default={})
comment = models.CharField(max_length=128, blank=True, null=True, verbose_name=_("Comment"))
@property
def zone_size(self):
zones = Zone.objects.filter(region=self)
return len(zones)
@property
def cluster_size(self):
clusters = []
plans = Plan.objects.filter(region=self)
for plan in plans:
from kubeops_api.models.cluster import Cluster
cs = Cluster.objects.filter(plan=plan)
for c in cs:
clusters.append(c)
return len(clusters)
@property
def image_ovf_path(self):
return self.vars['image_ovf_path']
@property
def image_vmdk_path(self):
return self.vars['image_vmdk_path']
@property
def image_name(self):
return self.vars['image_name']
def set_vars(self):
meta = self.template.meta.get('region', None)
if meta:
_vars = meta.get('vars', {})
self.vars.update(_vars)
self.save()
def on_region_create(self):
self.set_vars()
def to_dict(self):
dic = {
"region": self.cloud_region
}
dic.update(self.vars)
return dic
class Zone(models.Model):
ZONE_STATUS_READY = "READY"
ZONE_STATUS_INITIALIZING = "INITIALIZING"
ZONE_STATUS_ERROR = "ERROR"
ZONE_STATUS_CHOICES = (
(ZONE_STATUS_READY, 'READY'),
(ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
(ZONE_STATUS_ERROR, 'ERROR'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
vars = common_models.JsonDictTextField(default={})
region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
cloud_zone = models.CharField(max_length=128, null=True, default=None)
ip_used = common_models.JsonListTextField(null=True, default=[])
status = models.CharField(max_length=64, choices=ZONE_STATUS_CHOICES, null=True)
@property
def host_size(self):
hosts = Host.objects.filter(zone=self)
return len(hosts)
def change_status(self, status):
self.status = status
self.save()
def create_image(self):
try:
self.change_status(Zone.ZONE_STATUS_INITIALIZING)
client = get_cloud_client(self.region.vars)
client.create_image(zone=self)
self.change_status(Zone.ZONE_STATUS_READY)
except Exception as e:
self.change_status(Zone.ZONE_STATUS_ERROR)
def on_zone_create(self):
thread = threading.Thread(target=self.create_image)
thread.start()
def allocate_ip(self):
ip = self.ip_pools().pop()
self.ip_used.append(ip)
self.save()
return ip
def recover_ip(self, ip):
self.ip_used.remove(ip)
self.save()
def to_dict(self):
dic = {
"key": "z" + str(self.id).split("-")[3],
"name": self.cloud_zone,
"zone_name": self.name,
"ip_pool": self.ip_pools()
}
dic.update(self.vars)
return dic
def ip_pools(self):
ip_pool = []
ip_start = ip_address(self.vars['ip_start'])
ip_end = ip_address(self.vars['ip_end'])
if self.region.template.name == 'openstack':
while ip_start <= ip_end:
ip_pool.append(str(ip_start))
ip_start += 1
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
net_mask = self.vars['net_mask']
interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
network = interface.network
for host in network.hosts():
if ip_start <= host <= ip_end:
ip_pool.append(str(host))
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
def ip_available_size(self):
return len(self.ip_pools())
@property
def provider(self):
return self.region.template.name
class Plan(models.Model):
DEPLOY_TEMPLATE_SINGLE = "SINGLE"
DEPLOY_TEMPLATE_MULTIPLE = "MULTIPLE"
DEPLOY_TEMPLATE_CHOICES = (
(DEPLOY_TEMPLATE_SINGLE, 'single'),
(DEPLOY_TEMPLATE_MULTIPLE, 'multiple'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
zone = models.ForeignKey('Zone', null=True, on_delete=models.CASCADE)
region = models.ForeignKey('Region', null=True, on_delete=models.CASCADE)
zones = models.ManyToManyField('Zone', related_name='zones')
deploy_template = models.CharField(choices=DEPLOY_TEMPLATE_CHOICES, default=DEPLOY_TEMPLATE_SINGLE, max_length=128)
vars = common_models.JsonDictTextField(default={})
@property
def mixed_vars(self):
_vars = self.vars.copy()
_vars.update(self.region.to_dict())
zones = self.get_zones()
zone_dicts = []
for zone in zones:
zone_dicts.append(zone.to_dict())
_vars['zones'] = zone_dicts
return _vars
def get_zones(self):
zones = []
if self.zone:
zones.append(self.zone)
if self.zones:
zones.extend(self.zones.all())
return zones
@property
def compute_models(self):
return {
"master": self.vars.get('master_model', None),
"worker": self.vars.get('worker_model', None)
}
| 33.626087
| 119
| 0.641195
|
import os
import threading
import uuid
from ipaddress import ip_address, ip_interface, ip_network
import yaml
from django.db import models
from ansible_api.models.mixins import AbstractExecutionModel
from cloud_provider import get_cloud_client
from common import models as common_models
from fit2ansible import settings
from django.utils.translation import ugettext_lazy as _
from kubeops_api.models.host import Host
class CloudProviderTemplate(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = common_models.JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template_dir = os.path.join(settings.BASE_DIR, 'resource', 'clouds')
@property
def path(self):
return os.path.join(self.template_dir, self.name)
@classmethod
def lookup(cls):
for d in os.listdir(cls.template_dir):
full_path = os.path.join(cls.template_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
cls.objects.update_or_create(defaults=defaults, name=d)
class Region(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template = models.ForeignKey('CloudProviderTemplate', on_delete=models.SET_NULL, null=True)
cloud_region = models.CharField(max_length=128, null=True, default=None)
vars = common_models.JsonDictTextField(default={})
comment = models.CharField(max_length=128, blank=True, null=True, verbose_name=_("Comment"))
@property
def zone_size(self):
zones = Zone.objects.filter(region=self)
return len(zones)
@property
def cluster_size(self):
clusters = []
plans = Plan.objects.filter(region=self)
for plan in plans:
from kubeops_api.models.cluster import Cluster
cs = Cluster.objects.filter(plan=plan)
for c in cs:
clusters.append(c)
return len(clusters)
@property
def image_ovf_path(self):
return self.vars['image_ovf_path']
@property
def image_vmdk_path(self):
return self.vars['image_vmdk_path']
@property
def image_name(self):
return self.vars['image_name']
def set_vars(self):
meta = self.template.meta.get('region', None)
if meta:
_vars = meta.get('vars', {})
self.vars.update(_vars)
self.save()
def on_region_create(self):
self.set_vars()
def to_dict(self):
dic = {
"region": self.cloud_region
}
dic.update(self.vars)
return dic
class Zone(models.Model):
ZONE_STATUS_READY = "READY"
ZONE_STATUS_INITIALIZING = "INITIALIZING"
ZONE_STATUS_ERROR = "ERROR"
ZONE_STATUS_CHOICES = (
(ZONE_STATUS_READY, 'READY'),
(ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
(ZONE_STATUS_ERROR, 'ERROR'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
vars = common_models.JsonDictTextField(default={})
region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
cloud_zone = models.CharField(max_length=128, null=True, default=None)
ip_used = common_models.JsonListTextField(null=True, default=[])
status = models.CharField(max_length=64, choices=ZONE_STATUS_CHOICES, null=True)
@property
def host_size(self):
hosts = Host.objects.filter(zone=self)
return len(hosts)
def change_status(self, status):
self.status = status
self.save()
def create_image(self):
try:
self.change_status(Zone.ZONE_STATUS_INITIALIZING)
client = get_cloud_client(self.region.vars)
client.create_image(zone=self)
self.change_status(Zone.ZONE_STATUS_READY)
except Exception as e:
self.change_status(Zone.ZONE_STATUS_ERROR)
def on_zone_create(self):
thread = threading.Thread(target=self.create_image)
thread.start()
def allocate_ip(self):
ip = self.ip_pools().pop()
self.ip_used.append(ip)
self.save()
return ip
def recover_ip(self, ip):
self.ip_used.remove(ip)
self.save()
def to_dict(self):
dic = {
"key": "z" + str(self.id).split("-")[3],
"name": self.cloud_zone,
"zone_name": self.name,
"ip_pool": self.ip_pools()
}
dic.update(self.vars)
return dic
def ip_pools(self):
ip_pool = []
ip_start = ip_address(self.vars['ip_start'])
ip_end = ip_address(self.vars['ip_end'])
if self.region.template.name == 'openstack':
while ip_start <= ip_end:
ip_pool.append(str(ip_start))
ip_start += 1
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
net_mask = self.vars['net_mask']
interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
network = interface.network
for host in network.hosts():
if ip_start <= host <= ip_end:
ip_pool.append(str(host))
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
def ip_available_size(self):
return len(self.ip_pools())
@property
def provider(self):
return self.region.template.name
class Plan(models.Model):
DEPLOY_TEMPLATE_SINGLE = "SINGLE"
DEPLOY_TEMPLATE_MULTIPLE = "MULTIPLE"
DEPLOY_TEMPLATE_CHOICES = (
(DEPLOY_TEMPLATE_SINGLE, 'single'),
(DEPLOY_TEMPLATE_MULTIPLE, 'multiple'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
zone = models.ForeignKey('Zone', null=True, on_delete=models.CASCADE)
region = models.ForeignKey('Region', null=True, on_delete=models.CASCADE)
zones = models.ManyToManyField('Zone', related_name='zones')
deploy_template = models.CharField(choices=DEPLOY_TEMPLATE_CHOICES, default=DEPLOY_TEMPLATE_SINGLE, max_length=128)
vars = common_models.JsonDictTextField(default={})
@property
def mixed_vars(self):
_vars = self.vars.copy()
_vars.update(self.region.to_dict())
zones = self.get_zones()
zone_dicts = []
for zone in zones:
zone_dicts.append(zone.to_dict())
_vars['zones'] = zone_dicts
return _vars
def get_zones(self):
zones = []
if self.zone:
zones.append(self.zone)
if self.zones:
zones.extend(self.zones.all())
return zones
@property
def compute_models(self):
return {
"master": self.vars.get('master_model', None),
"worker": self.vars.get('worker_model', None)
}
| true
| true
|
7902516c796ecfecb984b33926cda61b355d91f4
| 15,847
|
py
|
Python
|
tests/users/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
tests/users/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
tests/users/test_views.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
from importlib import import_module, reload
import pytest
import sys
from unittest.mock import patch
from rest_framework import status
from django.contrib.auth.models import Permission, Group
from django.conf import settings
from django.urls import clear_url_caches
from django.urls import reverse
from .factories import UserFactory
from groups.models import GroupInfo
from users.models import UserProfile
BLANK_CHOICE = ('', '---------')
USER_DETAILS = {
'username': 'test',
'email': 'test@test.com',
'first_name': 'Foo',
'last_name': 'Bar',
}
USER_DETAILS_CREATE = USER_DETAILS.copy()
USER_DETAILS_CREATE.update(password1='pass', password2='pass')
USER_DETAILS_CHANGING = {
'username': 'johnsmith',
'email': 'john@smiths.com',
'first_name': 'John',
'last_name': 'Smith',
}
@pytest.mark.django_db
def test_create_user_view_get(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_create_user_view(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.post(url, data=USER_DETAILS_CREATE)
assert response.context['message'] == 'User test created.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
@pytest.mark.django_db
def test_create_user_view_invalid_form(admin_client):
url = reverse('wagtailusers_users:add')
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be created due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_edit_user_view(admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.context['can_delete'] is True
@pytest.mark.django_db
def test_edit_user_view(team_leaders_group, admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
# We'll add the user to a group, as well as changing their details
post_data = USER_DETAILS_CHANGING.copy()
post_data['groups'] = [team_leaders_group.pk]
response = admin_client.post(url, data=post_data)
assert response.context['message'] == 'User johnsmith updated.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The user's details should have changed to reflect the posted values
user.refresh_from_db()
for field_name, changed_value in USER_DETAILS_CHANGING.items():
assert getattr(user, field_name) == changed_value
# And they should have been added to a group
group_ids = set(user.groups.values_list('id', flat=True))
assert group_ids == {team_leaders_group.pk}
@pytest.mark.django_db
def test_edit_user_view_invalid_form(admin_client, approved_user):
url = reverse('wagtailusers_users:edit', kwargs={'pk': approved_user.pk})
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be saved due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_edit_user_view_cannot_change_personal_details_when_sso_enforced(
admin_client
):
# Set this flag to True and repeat previous test
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = UserFactory(**USER_DETAILS)
# Post changes to the view
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=USER_DETAILS_CHANGING)
# The users details should remain unchanged, because the
# personal detail fields should all be disabled
user.refresh_from_db()
for field_name, original_value in USER_DETAILS.items():
assert getattr(user, field_name) == original_value
# Change this back to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_preserves_ability_to_update_is_active(admin_client):
# Set this flag to True and actions if previous test
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
# Create an 'inactive' user to test with
user = UserFactory(**USER_DETAILS)
user.is_active = False
user.save()
# Post using the same details + 'is_active=on'
post_data = USER_DETAILS.copy()
post_data.update(is_active='on')
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=post_data)
# The change to 'is_active' should have been applied, because that field
# is not disabled along with the personal detail ones
user.refresh_from_db()
assert user.is_active is True
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_warns_administrator_if_user_is_awaiting_approval(
admin_client, user_awaiting_approval
):
# This flag must be set for the warning to show
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
message = response.context['message']
assert "This user is awaiting approval" in message
assert "requested to be added to the 'Moderators' group" in message
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_marks_user_as_approved_if_added_to_group(
admin_client, admin_user, user_awaiting_approval
):
# This flag must be set for the warning to show
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
profile = user_awaiting_approval.userprofile
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
group = Group.objects.get(pk=profile.self_assigned_group_id)
group.permissions.add(Permission.objects.get(codename='access_admin'))
with patch('users.views.notify_user_of_access_request_approval', autospec=True) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
'groups': [group.pk],
})
# Ensure the post was successful
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The UserProfile should have been updated
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_APPROVED
assert profile.approved_by_id == admin_user.id
assert profile.approved_at is not None
# A notification should have been triggered for the user
expected_call_args = dict(
request=response.wsgi_request,
user_email=user.email,
user_name=user.first_name,
reviewer_name=admin_user.get_full_name(),
)
mocked_method.assert_called_with(**expected_call_args)
# Reset flag to avoid cross-test pollution
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_does_not_mark_user_as_approved_if_not_added_to_a_group(admin_client, groups_with_info):
user = UserFactory(username='some-user')
profile = user.userprofile
profile.assignment_status = UserProfile.STATUS_AWAITING_APPROVAL
profile.self_assigned_group_id = groups_with_info[0].id
profile.save()
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
with patch(
'users.views.notify_user_of_access_request_approval'
) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
})
# Ensure the post was successful
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The UserProfile should NOT have been updated
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL
assert profile.approved_by_id is None
assert profile.approved_at is None
# no notification should have been triggered
mocked_method.assert_not_called()
def reload_urlconf(urlconf=None):
clear_url_caches()
if urlconf is None:
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
@pytest.mark.django_db
def test_force_staff_sso(client):
"""Test that URLs and redirects are in place."""
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
settings.AUTHBROKER_CLIENT_ID = 'debug'
settings.AUTHBROKER_CLIENT_SECRET = 'debug'
settings.AUTHBROKER_URL = 'https://test.com'
reload_urlconf()
assert reverse('authbroker_client:login') == '/auth/login/'
assert reverse('authbroker_client:callback') == '/auth/callback/'
response = client.get('/admin/login/')
assert response.status_code == 302
assert response.url == '/auth/login/'
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.parametrize('assignment_status, expected_status_code', (
(UserProfile.STATUS_CREATED, 200),
(UserProfile.STATUS_AWAITING_APPROVAL, 302),
(UserProfile.STATUS_APPROVED, 302)
))
@pytest.mark.django_db
def test_ssorequestaccessview_responds_based_on_assignment_status(
admin_client, admin_user, assignment_status, expected_status_code
):
url = reverse('sso:request_access')
profile = admin_user.userprofile
profile.assignment_status = assignment_status
profile.save()
response = admin_client.get(url)
assert response.status_code == expected_status_code
@pytest.mark.django_db
def test_ssorequestaccessview_shows_unlimited_visibilty_groups_only(
admin_client, groups_with_info
):
url = reverse('sso:request_access')
# Visbility is set to 'unrestricted' for all groups in `groups_with_info`,
# so choices should reflect that by default
expected_choices = tuple(
(g.id, g.info.name_singular) for g in groups_with_info
)
# Confirm the choices in the form are as expected
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
actual_choices = tuple(group_field.choices)
assert actual_choices == expected_choices
# Change the visibility of groups and try again
GroupInfo.objects.all().update(
visibility=GroupInfo.VISIBILITY_MANAGERS_ONLY)
# Choices should be empty now
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
assert tuple(group_field.choices) == ()
@pytest.mark.django_db
def test_ssorequestaccessview_with_no_team_leaders_group(admin_client):
# If no 'team leaders group' has been designated, the 'team_leaders'
# field should only have a 'blank' option
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders_group_but_no_members(
admin_client, team_leaders_group
):
# If the designated 'team leaders group' has no members, the 'team_leaders'
# field should only have a 'blank' option
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert team_leaders_group.user_set.all().exists() is False
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders(
admin_client, team_leaders_group, team_leaders
):
url = reverse('sso:request_access')
# When team leaders are defined, they will appear as choices
# for the 'team_leaders' field
expected_choices = [BLANK_CHOICE]
expected_choices.extend(list(
(tl.id, "{} <{}>".format(tl.get_full_name(), tl.email))
for tl in team_leaders
))
# Confirm the choices in the form are as expected
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
actual_choices = list(team_leader_field.choices)
assert actual_choices == expected_choices
@pytest.mark.django_db
def test_ssorequestaccessview_fails_validation_if_form_incomplete(
admin_client, groups_with_info, team_leaders
):
url = reverse('sso:request_access')
response = admin_client.post(url, data={})
# Should still be on the same view
assert response.status_code == 200
# Both form fields should have errors
assert 'self_assigned_group' in response.context['form'].errors
assert 'team_leader' in response.context['form'].errors
@pytest.mark.django_db
def test_ssorequestaccessview_post_with_complete_data(
admin_client, admin_user, groups_with_info, team_leaders
):
group = groups_with_info[0]
team_leader = team_leaders[0]
with patch(
'users.views.notify_team_leader_of_pending_access_request',
autospec=True
) as mocked_method:
response = admin_client.post(
reverse('sso:request_access'),
data={
'self_assigned_group': group.id,
'team_leader': team_leader.id,
}
)
# Should be redirected to the success url
success_url = reverse('sso:request_access_success')
assert response.url == success_url
# The UserProfile for `admin_user` should have been updated
profile = admin_user.userprofile
assert profile.self_assigned_group_id == group.id
assert profile.team_leader_id == team_leader.id
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL # noqa
# A notification should have been triggered for the user
expected_call_args = dict(
request=response.wsgi_request,
team_leader_email=team_leader.email,
team_leader_name=team_leader.first_name,
user_id=admin_user.id,
user_name=admin_user.get_full_name(),
user_email=admin_user.email,
user_role=group.info.name_singular,
)
mocked_method.assert_called_with(**expected_call_args)
@pytest.mark.django_db
@pytest.mark.parametrize('url', (
reverse('sso:request_access'),
reverse('sso:request_access_success'),
))
def test_ssorequestaccess_views_only_available_to_authenticated_users(
client, admin_client, url
):
# When not authenticated, the user is redirected to the login page
response = client.get(url)
assert response.status_code == 302
assert response.url.startswith(settings.LOGIN_URL)
# When authenticated, things work fine
response = admin_client.get(url)
assert response.status_code == 200
| 34.45
| 111
| 0.730927
|
from importlib import import_module, reload
import pytest
import sys
from unittest.mock import patch
from rest_framework import status
from django.contrib.auth.models import Permission, Group
from django.conf import settings
from django.urls import clear_url_caches
from django.urls import reverse
from .factories import UserFactory
from groups.models import GroupInfo
from users.models import UserProfile
BLANK_CHOICE = ('', '---------')
USER_DETAILS = {
'username': 'test',
'email': 'test@test.com',
'first_name': 'Foo',
'last_name': 'Bar',
}
USER_DETAILS_CREATE = USER_DETAILS.copy()
USER_DETAILS_CREATE.update(password1='pass', password2='pass')
USER_DETAILS_CHANGING = {
'username': 'johnsmith',
'email': 'john@smiths.com',
'first_name': 'John',
'last_name': 'Smith',
}
@pytest.mark.django_db
def test_create_user_view_get(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_create_user_view(admin_client):
url = reverse('wagtailusers_users:add')
response = admin_client.post(url, data=USER_DETAILS_CREATE)
assert response.context['message'] == 'User test created.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
@pytest.mark.django_db
def test_create_user_view_invalid_form(admin_client):
url = reverse('wagtailusers_users:add')
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be created due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_get_edit_user_view(admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.context['can_delete'] is True
@pytest.mark.django_db
def test_edit_user_view(team_leaders_group, admin_client):
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
post_data = USER_DETAILS_CHANGING.copy()
post_data['groups'] = [team_leaders_group.pk]
response = admin_client.post(url, data=post_data)
assert response.context['message'] == 'User johnsmith updated.'
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
# The user's details should have changed to reflect the posted values
user.refresh_from_db()
for field_name, changed_value in USER_DETAILS_CHANGING.items():
assert getattr(user, field_name) == changed_value
group_ids = set(user.groups.values_list('id', flat=True))
assert group_ids == {team_leaders_group.pk}
@pytest.mark.django_db
def test_edit_user_view_invalid_form(admin_client, approved_user):
url = reverse('wagtailusers_users:edit', kwargs={'pk': approved_user.pk})
post_data = USER_DETAILS.copy()
post_data.update(email='This is not an email address')
response = admin_client.post(url, post_data)
message = response.context['message']
assert message == 'The user could not be saved due to errors.'
assert response.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_edit_user_view_cannot_change_personal_details_when_sso_enforced(
admin_client
):
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = UserFactory(**USER_DETAILS)
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=USER_DETAILS_CHANGING)
user.refresh_from_db()
for field_name, original_value in USER_DETAILS.items():
assert getattr(user, field_name) == original_value
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_preserves_ability_to_update_is_active(admin_client):
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = UserFactory(**USER_DETAILS)
user.is_active = False
user.save()
post_data = USER_DETAILS.copy()
post_data.update(is_active='on')
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
admin_client.post(url, data=post_data)
user.refresh_from_db()
assert user.is_active is True
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_warns_administrator_if_user_is_awaiting_approval(
admin_client, user_awaiting_approval
):
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
response = admin_client.get(url)
message = response.context['message']
assert "This user is awaiting approval" in message
assert "requested to be added to the 'Moderators' group" in message
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_marks_user_as_approved_if_added_to_group(
admin_client, admin_user, user_awaiting_approval
):
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
user = user_awaiting_approval
profile = user_awaiting_approval.userprofile
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
group = Group.objects.get(pk=profile.self_assigned_group_id)
group.permissions.add(Permission.objects.get(codename='access_admin'))
with patch('users.views.notify_user_of_access_request_approval', autospec=True) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
'groups': [group.pk],
})
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_APPROVED
assert profile.approved_by_id == admin_user.id
assert profile.approved_at is not None
expected_call_args = dict(
request=response.wsgi_request,
user_email=user.email,
user_name=user.first_name,
reviewer_name=admin_user.get_full_name(),
)
mocked_method.assert_called_with(**expected_call_args)
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.django_db
def test_edit_user_view_does_not_mark_user_as_approved_if_not_added_to_a_group(admin_client, groups_with_info):
user = UserFactory(username='some-user')
profile = user.userprofile
profile.assignment_status = UserProfile.STATUS_AWAITING_APPROVAL
profile.self_assigned_group_id = groups_with_info[0].id
profile.save()
url = reverse('wagtailusers_users:edit', kwargs={'pk': user.pk})
with patch(
'users.views.notify_user_of_access_request_approval'
) as mocked_method:
response = admin_client.post(url, {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'is_active': True,
})
assert response.context['message'] == 'User %s updated.' % user.username
assert response.status_code == status.HTTP_302_FOUND
assert response.url == reverse('wagtailusers_users:index')
profile.refresh_from_db()
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL
assert profile.approved_by_id is None
assert profile.approved_at is None
mocked_method.assert_not_called()
def reload_urlconf(urlconf=None):
clear_url_caches()
if urlconf is None:
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
@pytest.mark.django_db
def test_force_staff_sso(client):
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = True
settings.AUTHBROKER_CLIENT_ID = 'debug'
settings.AUTHBROKER_CLIENT_SECRET = 'debug'
settings.AUTHBROKER_URL = 'https://test.com'
reload_urlconf()
assert reverse('authbroker_client:login') == '/auth/login/'
assert reverse('authbroker_client:callback') == '/auth/callback/'
response = client.get('/admin/login/')
assert response.status_code == 302
assert response.url == '/auth/login/'
settings.FEATURE_FLAGS['ENFORCE_STAFF_SSO_ON'] = False
reload_urlconf()
@pytest.mark.parametrize('assignment_status, expected_status_code', (
(UserProfile.STATUS_CREATED, 200),
(UserProfile.STATUS_AWAITING_APPROVAL, 302),
(UserProfile.STATUS_APPROVED, 302)
))
@pytest.mark.django_db
def test_ssorequestaccessview_responds_based_on_assignment_status(
admin_client, admin_user, assignment_status, expected_status_code
):
url = reverse('sso:request_access')
profile = admin_user.userprofile
profile.assignment_status = assignment_status
profile.save()
response = admin_client.get(url)
assert response.status_code == expected_status_code
@pytest.mark.django_db
def test_ssorequestaccessview_shows_unlimited_visibilty_groups_only(
admin_client, groups_with_info
):
url = reverse('sso:request_access')
expected_choices = tuple(
(g.id, g.info.name_singular) for g in groups_with_info
)
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
actual_choices = tuple(group_field.choices)
assert actual_choices == expected_choices
GroupInfo.objects.all().update(
visibility=GroupInfo.VISIBILITY_MANAGERS_ONLY)
response = admin_client.get(url)
group_field = response.context['form']['self_assigned_group'].field
assert tuple(group_field.choices) == ()
@pytest.mark.django_db
def test_ssorequestaccessview_with_no_team_leaders_group(admin_client):
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders_group_but_no_members(
admin_client, team_leaders_group
):
url = reverse('sso:request_access')
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
assert team_leaders_group.user_set.all().exists() is False
assert tuple(team_leader_field.choices) == (BLANK_CHOICE,)
@pytest.mark.django_db
def test_ssorequestaccessview_with_team_leaders(
admin_client, team_leaders_group, team_leaders
):
url = reverse('sso:request_access')
expected_choices = [BLANK_CHOICE]
expected_choices.extend(list(
(tl.id, "{} <{}>".format(tl.get_full_name(), tl.email))
for tl in team_leaders
))
response = admin_client.get(url)
team_leader_field = response.context['form']['team_leader'].field
actual_choices = list(team_leader_field.choices)
assert actual_choices == expected_choices
@pytest.mark.django_db
def test_ssorequestaccessview_fails_validation_if_form_incomplete(
admin_client, groups_with_info, team_leaders
):
url = reverse('sso:request_access')
response = admin_client.post(url, data={})
assert response.status_code == 200
assert 'self_assigned_group' in response.context['form'].errors
assert 'team_leader' in response.context['form'].errors
@pytest.mark.django_db
def test_ssorequestaccessview_post_with_complete_data(
admin_client, admin_user, groups_with_info, team_leaders
):
group = groups_with_info[0]
team_leader = team_leaders[0]
with patch(
'users.views.notify_team_leader_of_pending_access_request',
autospec=True
) as mocked_method:
response = admin_client.post(
reverse('sso:request_access'),
data={
'self_assigned_group': group.id,
'team_leader': team_leader.id,
}
)
success_url = reverse('sso:request_access_success')
assert response.url == success_url
profile = admin_user.userprofile
assert profile.self_assigned_group_id == group.id
assert profile.team_leader_id == team_leader.id
assert profile.assignment_status == UserProfile.STATUS_AWAITING_APPROVAL
expected_call_args = dict(
request=response.wsgi_request,
team_leader_email=team_leader.email,
team_leader_name=team_leader.first_name,
user_id=admin_user.id,
user_name=admin_user.get_full_name(),
user_email=admin_user.email,
user_role=group.info.name_singular,
)
mocked_method.assert_called_with(**expected_call_args)
@pytest.mark.django_db
@pytest.mark.parametrize('url', (
reverse('sso:request_access'),
reverse('sso:request_access_success'),
))
def test_ssorequestaccess_views_only_available_to_authenticated_users(
client, admin_client, url
):
response = client.get(url)
assert response.status_code == 302
assert response.url.startswith(settings.LOGIN_URL)
response = admin_client.get(url)
assert response.status_code == 200
| true
| true
|
790254dc748b02c37a6308b355bd3094569a55d8
| 82
|
py
|
Python
|
test.py
|
demengliu/network
|
028c2a5006640cf10b9aa8c47a225272e0a30954
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
demengliu/network
|
028c2a5006640cf10b9aa8c47a225272e0a30954
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
demengliu/network
|
028c2a5006640cf10b9aa8c47a225272e0a30954
|
[
"Apache-2.0"
] | null | null | null |
git clone
git add 文件/文件夹
git commit -m '提交说明'
git push orgin('默认仓库名') master(分支名)
| 16.4
| 35
| 0.719512
|
git clone
git add 文件/文件夹
git commit -m '提交说明'
git push orgin('默认仓库名') master(分支名)
| false
| true
|
79025512b5ca24bb4b2e9159df5cf2b311245ffb
| 14,774
|
py
|
Python
|
genMassive.py
|
ABKGroup/GenMassive
|
c9cb2de29e1054787a3310840073bab3fab08c75
|
[
"BSD-3-Clause"
] | null | null | null |
genMassive.py
|
ABKGroup/GenMassive
|
c9cb2de29e1054787a3310840073bab3fab08c75
|
[
"BSD-3-Clause"
] | null | null | null |
genMassive.py
|
ABKGroup/GenMassive
|
c9cb2de29e1054787a3310840073bab3fab08c75
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# This scripts attempts to generate massive design of experiment runscripts.
# and save it into a "runMassive.sh" and "doe.log".
#-------------------------------------------------------------------------------
import os, sys
import os.path
import re
import itertools
import glob
PUBLIC = ['nangate45', 'sky130hd', 'sky130hs', 'asap7']
# The number of generated config files into designs/{platform}/{design}/chunks/chuck{number} directory.
NumFilesPerChunk = 50000
## Orignal SDC file name
OriginalSDC = 'constraint_doe.sdc'
##################################
# define input parameters
##################################
# for generated .sh file name
ShellName = 'runMassive'
##################
# Design
##################
## Define platform-design. User should remove ',' for the last item in the list. (string)
PLATFORM_DESIGN = [ \
#'sky130hd-gcd' \
'sky130hd-ibex', \
#'sky130hd-aes', \
#'sky130hd-jpeg', \
#'sky130hs-gcd', \
#'sky130hs-ibex', \
#'sky130hs-aes', \
#'sky130hs-jpeg', \
#'nangate45-gcd', \
#'nangate45-ibex', \
#'nangate45-aes', \
#'nangate45-jpeg', \
#'asap7-gcd', \
#'asap7-ibex', \
#'asap7-aes', \
#'asap7-jpeg', \
]
## Target Clock Period (float)
CLK_PERIOD = []
## SDC uncertainty and IO delay.
## TODO: Currently, it only support when 'set uncertainty' and 'set io_delay'
## are defined in the constraint.sdc file.
UNCERTAINTY = []
IO_DELAY = []
##################
# Synthesis
##################
## Clock period for Yosys (for synthesis)
## The unit should follow each design (ns, ps) (float)
ABC_CLOCK_PERIOD = []
## Hierarchical Synthsis. 0 = hierarchical, 1 = flatten, empty = flatten (default) (int)
FLATTEN = []
##################
# Floorplan
##################
## Utilization. e.g, 45 -> 45% of core util. (int)
#CORE_UTIL = [20, 40, 55]
CORE_UTIL = [20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50]
## Aspect ratio. It REQUIRES 'CORE_UTIL' values (float)
ASPECT_RATIO = [0.5, 0.75, 1.0, 1.25, 1.5]
## Core-to-die gap distance (um). It REQUIRES 'CORE_UTIL' values (int)
CORE_DIE_MARGIN = [10]
## Pin Distance
#PINS_DISTANCE = [2]
PINS_DISTANCE = []
##################
# Placement
##################
## Global Placement Padding for std cells (int)
GP_PAD = [4]
## Detailed Placement Padding for std cells (int)
DP_PAD = [2]
## Global Placement target bin density (select only one option) (.2 float)
## option 1) PLACE_DENSITY uses the values in the list as it is.
## option 2) PLACE_DENSITY_LB_ADDON adds the values in the list to the lower boundary of the PLACE_DENSITY
## For eaxmple, PLACE_DENSITY_LB_ADDON = [0, 0.02, 0.04] means PLACE_DENSITY = [LB, LB+0.02, LB+0.04]
## LB of the place density == (total instance area + padding) / total die area
PLACE_DENSITY = []
PLACE_DENSITY_LB_ADDON = [0, 0.04, 0.08]
##################
# CTS
##################
## CTS clustering size and diameter (um) (int)
CTS_CLUSTER_SIZE = []
CTS_CLUSTER_DIAMETER = []
##################
# Global Routing
##################
## Set global routing layer capacity adjustment
## e.g.) 0.2 -> 20% usage for global routing
## Set for all layers.
## Each layer's layer adjustment will be overwritten with below per-layer values. (float)
LAYER_ADJUST = [0.5]
LAYER_ADJUST_M1 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M2 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M3 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M4 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M5 = []
LAYER_ADJUST_M6 = []
LAYER_ADJUST_M7 = []
LAYER_ADJUST_M8 = []
LAYER_ADJUST_M9 = []
## Set global routing random seed. (int)
GR_SEED = []
## Set allow global routing overflow. 0 = no, 1 = yes, empty = no (default) (int)
# TODO: currently it does not work. Let this as 0 as it is.
GR_OVERFLOW = [0]
##################
# Detailed Routing
##################
## Set global routing random seed. (int)
DR_SEED = []
SweepingAttributes = { "PLATFORM_DESIGN": PLATFORM_DESIGN,
"CP": CLK_PERIOD,
"ABC_CP": ABC_CLOCK_PERIOD,
"FLATTEN": FLATTEN,
"UNCERTAINTY": UNCERTAINTY,
"IO_DELAY": IO_DELAY,
"UTIL": CORE_UTIL,
"AR": ASPECT_RATIO,
"GAP": CORE_DIE_MARGIN,
"PINS_DISTANCE": PINS_DISTANCE,
"GP_PAD": GP_PAD,
"DP_PAD": DP_PAD,
"PD": PLACE_DENSITY,
"PD_LB_ADD": PLACE_DENSITY_LB_ADDON,
"CTS_CLUSTER_SIZE": CTS_CLUSTER_SIZE,
"CTS_CLUSTER_DIAMETER": CTS_CLUSTER_DIAMETER,
"LAYER_ADJUST": LAYER_ADJUST,
"M1": LAYER_ADJUST_M1,
"M2": LAYER_ADJUST_M2,
"M3": LAYER_ADJUST_M3,
"M4": LAYER_ADJUST_M4,
"M5": LAYER_ADJUST_M5,
"M6": LAYER_ADJUST_M6,
"M7": LAYER_ADJUST_M7,
"M8": LAYER_ADJUST_M8,
"M9": LAYER_ADJUST_M9,
"GR_SEED": GR_SEED,
"GR_OVERFLOW": GR_OVERFLOW,
"DR_SEED": DR_SEED }
def assignEmptyAttrs(dicts):
knobs = {}
for k, v in dicts.items():
if len(v) == 0:
knobs.setdefault(k, ['empty'])
else:
knobs.setdefault(k,v)
return knobs
def writeDoeLog(dicts, ProductDicts):
fo = open('./doe.log', 'w')
numRuns = 1
for k, v in dicts.items():
if len(v)>0:
print('%s has %s number of values'%(k,len(v)))
fo.write('%s has %s number of values\n'%(k,len(v)))
numRuns = numRuns * len(v)
fo.write('\nTotal Number of Runs = %s\n\n'%numRuns)
print('\nTotal Number of Runs = %s\n\n'%numRuns)
knobValuesList = []
knobNamesList = []
for CurAttrs in ProductAttrs:
knobValues = []
knobNames = []
for k, v in CurAttrs.items():
if v=='empty':
continue
else:
knobNames.append(str(k))
knobValues.append(str(v))
knobValuesList.append(knobValues)
knobNamesList.append(knobNames)
fo.write(str(knobNamesList[0])+'\n')
for knobSet in knobValuesList:
fo.write(str(knobSet)+'\n')
fo.close()
def productDict(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def adjustFastRoute(filedata, adjSet, GrOverflow):
if adjSet[0]!='empty':
filedata = re.sub("(set_global_routing_layer_adjustment .* )[0-9\.]+", "\g<1>{:.2f}".format(float(adjSet[0])), filedata)
sep_la_cmds = ""
for i, sep_la in enumerate(adjSet):
if i==0 or sep_la=='empty':
continue
## TODO: Currently, only supports for SKY130HD and SKY130HS.
## TODO: user should manually change the layer name to match techLEF.
layer_name = 'met%s'%i
sep_la_cmds += "set_global_routing_layer_adjustment " + layer_name + " {:.2f}\n".format(float(sep_la))
filedata = re.sub("set_global_routing_layer_adjustment.*\n", "\g<0>"+sep_la_cmds, filedata)
if int(GrOverflow) == 1:
filedata = re.sub("(global_route.*(\n\s+.*)*)", "\g<1> \\\n -allow_overflow", filedata)
return(filedata)
#def setPlaceDensity(DESIGN, Util, GpPad):
# if DESIGN == "ibex":
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# elif DESIGN == "aes":
# LB = (Util/100) + (GpPad * (0.5*(Util/100)-0.005))+0.02
# else:
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# return LB
def writeConfigs(CurAttrs, CurChunkNum):
CurPlatform, CurDesign = CurAttrs.get('PLATFORM_DESIGN').split('-')
CurClkPeriod = CurAttrs.get('CP')
CurAbcClkPeriod = CurAttrs.get('ABC_CP')
CurFlatten = CurAttrs.get('FLATTEN')
CurUncertainty = CurAttrs.get('UNCERTAINTY')
CurIoDelay = CurAttrs.get('IO_DELAY')
CurCoreUtil = CurAttrs.get('UTIL')
CurAspectRatio = CurAttrs.get('AR')
CurCoreDieMargin = CurAttrs.get('GAP')
CurPinsDistance = CurAttrs.get('PINS_DISTANCE')
CurGpPad = CurAttrs.get('GP_PAD')
CurDpPad = CurAttrs.get('DP_PAD')
CurPlaceDensity = CurAttrs.get('PD')
CurPlaceDensityLbAddon = CurAttrs.get('PD_LB_ADD')
CurCtsClusterSize = CurAttrs.get('CTS_CLUSTER_SIZE')
CurCtsClusterDiameter = CurAttrs.get('CTS_CLUSTER_DIAMETER')
CurLayerAdjust = CurAttrs.get('LAYER_ADJUST')
CurLayerAdjustM1 = CurAttrs.get('M1')
CurLayerAdjustM2 = CurAttrs.get('M2')
CurLayerAdjustM3 = CurAttrs.get('M3')
CurLayerAdjustM4 = CurAttrs.get('M4')
CurLayerAdjustM5 = CurAttrs.get('M5')
CurLayerAdjustM6 = CurAttrs.get('M6')
CurLayerAdjustM7 = CurAttrs.get('M7')
CurLayerAdjustM8 = CurAttrs.get('M8')
CurLayerAdjustM9 = CurAttrs.get('M9')
CurGrSeed = CurAttrs.get('GR_SEED')
CurGrOverflow = CurAttrs.get('GR_OVERFLOW')
CurDrSeed = CurAttrs.get('DR_SEED')
if not os.path.isdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)):
os.mkdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign))
CurDesignDir = './designs/%s/%s'%(CurPlatform,CurDesign)
CurChunkDir = './designs/%s/%s/chunks/chunk%s'%(CurPlatform,CurDesign,CurChunkNum)
if not os.path.isdir(CurChunkDir):
os.mkdir(CurChunkDir)
#print(CurChunkNum)
if MakeArg=='clean':
fileList = glob.glob('%s/*-DoE-*'%(CurChunkDir))
if fileList is not None:
for file in fileList:
os.remove(file)
return
#print(CurPlatform, CurDesign)
#print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil)
#print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad)
#print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust)
#print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3)
#print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6)
#print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9)
#print(CurGrOverflow)
#print(CurAttrs.items())
variantName = ''
for k, v in CurAttrs.items():
if v!='empty' and k!='PLATFORM_DESIGN':
variantName = variantName + '-' + str(k) + '_' + str(v)
variantName = variantName[1:]
#fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk'
fileName = 'config-DoE-'+variantName + '.mk'
fo = open('%s/%s'%(CurChunkDir,fileName), 'w')
fo.write('include $(realpath $(dir $(DESIGN_CONFIG))../../)/config.mk\n')
fo.write('\n')
fo.write('FLOW_VARIANT = %s\n'%(variantName))
fo.write('\n')
if CurClkPeriod != 'empty' or CurUncertainty != 'empty' or CurIoDelay != 'empty':
fOrigSdc = open('%s/%s'%(CurDesignDir,OriginalSDC),'r')
filedata = fOrigSdc.read()
fOrigSdc.close()
if CurClkPeriod != 'empty':
filedata = re.sub("-period [0-9\.]+", "-period " + str(CurClkPeriod), filedata)
#filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata)
filedata = re.sub("-waveform [{}\s0-9\.]+[\s|\n]", "", filedata)
if CurUncertainty != 'empty':
filedata = re.sub("set uncertainty [0-9\.]+", "set uncertainty " + str(CurUncertainty), filedata)
if CurIoDelay != 'empty':
filedata = re.sub("set io_delay [0-9\.]+", "set io_delay " + str(CurIoDelay), filedata)
#fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w')
fOutSdc = open('%s/constraint-DoE-'%(CurChunkDir)+variantName+'.sdc','w')
fOutSdc.write(filedata)
fOutSdc.close()
fo.write('export SDC_FILE = $(dir $(DESIGN_CONFIG))/constraint-DoE-%s.sdc\n'%variantName)
if CurAbcClkPeriod != 'empty':
fo.write('export ABC_CLOCK_PERIOD_IN_PS = %s\n'%CurAbcClkPeriod)
if CurFlatten != 'empty':
if CurFlatten == 0:
fo.write('export SYNTH_ARGS = \n')
if CurCoreUtil != 'empty':
fo.write('export CORE_UTILIZATION = %s\n'%CurCoreUtil)
if CurPlaceDensity != 'empty':
fo.write('export PLACE_DENSITY = %.2f\n'%CurPlaceDensity)
if CurPlaceDensityLbAddon != 'empty':
fo.write('export PLACE_DENSITY_LB_ADDON = %.2f\n'%CurPlaceDensityLbAddon)
if CurAspectRatio != 'empty':
fo.write('export CORE_ASPECT_RATIO = %s\n'%CurAspectRatio)
if CurCoreDieMargin != 'empty':
fo.write('export CORE_MARGIN = %s\n'%CurCoreDieMargin)
if CurPinsDistance != 'empty':
fo.write('export PLACE_PINS_ARGS = -min_distance %s\n'%CurPinsDistance)
if CurGpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_GLOBAL_PLACEMENT = %s\n'%CurGpPad)
if CurDpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_DETAIL_PLACEMENT = %s\n'%CurDpPad)
if CurCtsClusterSize != 'empty':
fo.write('export CTS_CLUSTER_SIZE = %s\n'%CurCtsClusterSize)
if CurCtsClusterDiameter != 'empty':
fo.write('export CTS_CLUSTER_DIAMETER = %s\n'%CurCtsClusterDiameter)
if CurDrSeed != 'empty':
fo.write('export OR_K = 1.0\n')
fo.write('export OR_SEED = %s\n'%CurDrSeed)
if CurLayerAdjust != 'empty' or \
CurLayerAdjustM1 != 'empty' or \
CurLayerAdjustM2 != 'empty' or \
CurLayerAdjustM3 != 'empty' or \
CurLayerAdjustM4 != 'empty' or \
CurLayerAdjustM5 != 'empty' or \
CurLayerAdjustM6 != 'empty' or \
CurLayerAdjustM7 != 'empty' or \
CurLayerAdjustM8 != 'empty' or \
CurLayerAdjustM9 != 'empty' or \
CurGrSeed != 'empty':
fo.write('export FASTROUTE_TCL = $(dir $(DESIGN_CONFIG))/fastroute-DoE-%s.tcl'%variantName)
if CurPlatform in PUBLIC:
PLATFORM_DIR = './platforms/%s'%CurPlatform
else:
PLATFORM_DIR = '../../%s'%CurPlatform
fFrIn = open('%s/fastroute.tcl'%PLATFORM_DIR,'r')
filedata = fFrIn.read()
fFrIn.close()
CurLayerAdjustSet = [CurLayerAdjust, \
CurLayerAdjustM1, \
CurLayerAdjustM2, \
CurLayerAdjustM3, \
CurLayerAdjustM4, \
CurLayerAdjustM5, \
CurLayerAdjustM6, \
CurLayerAdjustM7, \
CurLayerAdjustM8, \
CurLayerAdjustM9 ]
filedata = adjustFastRoute(filedata, CurLayerAdjustSet, CurGrOverflow)
FrName = 'fastroute-DoE-'+variantName+'.tcl'
fOutFr = open('%s/%s'%(CurChunkDir,FrName),'w')
fOutFr.write(filedata)
if CurGrSeed != 'empty':
fOutFr.write('set_global_routing_random -seed %s'%CurGrSeed)
fOutFr.close()
fo.close()
frun = open('./%s.sh'%ShellName, 'a')
RunName = 'DESIGN_CONFIG=%s/%s make\n'%(CurChunkDir,fileName)
frun.write(RunName)
frun.close()
fcollect = open('./%s_metrics_collect.sh'%ShellName, 'a')
CollectName = 'python util/genMetrics.py -x -p %s -d %s -v %s -o metrics_%s/%s.json\n'%(CurPlatform, CurDesign, variantName, ShellName, variantName)
fcollect.write(CollectName)
fcollect.close()
MakeArg = sys.argv[1]
if not os.path.isdir('./metrics_%s'%ShellName):
os.mkdir('./metrics_%s'%ShellName)
knobs = assignEmptyAttrs(SweepingAttributes)
ProductAttrs = list(productDict(knobs))
writeDoeLog(SweepingAttributes, ProductAttrs)
if os.path.isfile('./%s.sh'%ShellName):
os.remove('./%s.sh'%ShellName)
if os.path.isfile('./%s_metrics_collect.sh'%ShellName):
os.remove('./%s_metrics_collect.sh'%ShellName)
CurChunkNum = 0
for i, CurAttrs in enumerate(ProductAttrs, 1):
if i % NumFilesPerChunk == 0:
writeConfigs(CurAttrs, CurChunkNum)
CurChunkNum = CurChunkNum+1
else:
writeConfigs(CurAttrs, CurChunkNum)
# with open('file.txt') as data:
# line = data.readlines()
#
#for line in lines:
# with open('file.txt') as data:
# for line in file_data:
| 31.703863
| 150
| 0.648707
|
import os, sys
import os.path
import re
import itertools
import glob
PUBLIC = ['nangate45', 'sky130hd', 'sky130hs', 'asap7']
NumFilesPerChunk = 50000
int_doe.sdc'
def writeConfigs(CurAttrs, CurChunkNum):
CurPlatform, CurDesign = CurAttrs.get('PLATFORM_DESIGN').split('-')
CurClkPeriod = CurAttrs.get('CP')
CurAbcClkPeriod = CurAttrs.get('ABC_CP')
CurFlatten = CurAttrs.get('FLATTEN')
CurUncertainty = CurAttrs.get('UNCERTAINTY')
CurIoDelay = CurAttrs.get('IO_DELAY')
CurCoreUtil = CurAttrs.get('UTIL')
CurAspectRatio = CurAttrs.get('AR')
CurCoreDieMargin = CurAttrs.get('GAP')
CurPinsDistance = CurAttrs.get('PINS_DISTANCE')
CurGpPad = CurAttrs.get('GP_PAD')
CurDpPad = CurAttrs.get('DP_PAD')
CurPlaceDensity = CurAttrs.get('PD')
CurPlaceDensityLbAddon = CurAttrs.get('PD_LB_ADD')
CurCtsClusterSize = CurAttrs.get('CTS_CLUSTER_SIZE')
CurCtsClusterDiameter = CurAttrs.get('CTS_CLUSTER_DIAMETER')
CurLayerAdjust = CurAttrs.get('LAYER_ADJUST')
CurLayerAdjustM1 = CurAttrs.get('M1')
CurLayerAdjustM2 = CurAttrs.get('M2')
CurLayerAdjustM3 = CurAttrs.get('M3')
CurLayerAdjustM4 = CurAttrs.get('M4')
CurLayerAdjustM5 = CurAttrs.get('M5')
CurLayerAdjustM6 = CurAttrs.get('M6')
CurLayerAdjustM7 = CurAttrs.get('M7')
CurLayerAdjustM8 = CurAttrs.get('M8')
CurLayerAdjustM9 = CurAttrs.get('M9')
CurGrSeed = CurAttrs.get('GR_SEED')
CurGrOverflow = CurAttrs.get('GR_OVERFLOW')
CurDrSeed = CurAttrs.get('DR_SEED')
if not os.path.isdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)):
os.mkdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign))
CurDesignDir = './designs/%s/%s'%(CurPlatform,CurDesign)
CurChunkDir = './designs/%s/%s/chunks/chunk%s'%(CurPlatform,CurDesign,CurChunkNum)
if not os.path.isdir(CurChunkDir):
os.mkdir(CurChunkDir)
#print(CurChunkNum)
if MakeArg=='clean':
fileList = glob.glob('%s/*-DoE-*'%(CurChunkDir))
if fileList is not None:
for file in fileList:
os.remove(file)
return
#print(CurPlatform, CurDesign)
#print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil)
#print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad)
#print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust)
#print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3)
#print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6)
#print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9)
#print(CurGrOverflow)
#print(CurAttrs.items())
variantName = ''
for k, v in CurAttrs.items():
if v!='empty' and k!='PLATFORM_DESIGN':
variantName = variantName + '-' + str(k) + '_' + str(v)
variantName = variantName[1:]
#fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk'
fileName = 'config-DoE-'+variantName + '.mk'
fo = open('%s/%s'%(CurChunkDir,fileName), 'w')
fo.write('include $(realpath $(dir $(DESIGN_CONFIG))../../)/config.mk\n')
fo.write('\n')
fo.write('FLOW_VARIANT = %s\n'%(variantName))
fo.write('\n')
if CurClkPeriod != 'empty' or CurUncertainty != 'empty' or CurIoDelay != 'empty':
fOrigSdc = open('%s/%s'%(CurDesignDir,OriginalSDC),'r')
filedata = fOrigSdc.read()
fOrigSdc.close()
if CurClkPeriod != 'empty':
filedata = re.sub("-period [0-9\.]+", "-period " + str(CurClkPeriod), filedata)
#filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata)
filedata = re.sub("-waveform [{}\s0-9\.]+[\s|\n]", "", filedata)
if CurUncertainty != 'empty':
filedata = re.sub("set uncertainty [0-9\.]+", "set uncertainty " + str(CurUncertainty), filedata)
if CurIoDelay != 'empty':
filedata = re.sub("set io_delay [0-9\.]+", "set io_delay " + str(CurIoDelay), filedata)
#fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w')
fOutSdc = open('%s/constraint-DoE-'%(CurChunkDir)+variantName+'.sdc','w')
fOutSdc.write(filedata)
fOutSdc.close()
fo.write('export SDC_FILE = $(dir $(DESIGN_CONFIG))/constraint-DoE-%s.sdc\n'%variantName)
if CurAbcClkPeriod != 'empty':
fo.write('export ABC_CLOCK_PERIOD_IN_PS = %s\n'%CurAbcClkPeriod)
if CurFlatten != 'empty':
if CurFlatten == 0:
fo.write('export SYNTH_ARGS = \n')
if CurCoreUtil != 'empty':
fo.write('export CORE_UTILIZATION = %s\n'%CurCoreUtil)
if CurPlaceDensity != 'empty':
fo.write('export PLACE_DENSITY = %.2f\n'%CurPlaceDensity)
if CurPlaceDensityLbAddon != 'empty':
fo.write('export PLACE_DENSITY_LB_ADDON = %.2f\n'%CurPlaceDensityLbAddon)
if CurAspectRatio != 'empty':
fo.write('export CORE_ASPECT_RATIO = %s\n'%CurAspectRatio)
if CurCoreDieMargin != 'empty':
fo.write('export CORE_MARGIN = %s\n'%CurCoreDieMargin)
if CurPinsDistance != 'empty':
fo.write('export PLACE_PINS_ARGS = -min_distance %s\n'%CurPinsDistance)
if CurGpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_GLOBAL_PLACEMENT = %s\n'%CurGpPad)
if CurDpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_DETAIL_PLACEMENT = %s\n'%CurDpPad)
if CurCtsClusterSize != 'empty':
fo.write('export CTS_CLUSTER_SIZE = %s\n'%CurCtsClusterSize)
if CurCtsClusterDiameter != 'empty':
fo.write('export CTS_CLUSTER_DIAMETER = %s\n'%CurCtsClusterDiameter)
if CurDrSeed != 'empty':
fo.write('export OR_K = 1.0\n')
fo.write('export OR_SEED = %s\n'%CurDrSeed)
if CurLayerAdjust != 'empty' or \
CurLayerAdjustM1 != 'empty' or \
CurLayerAdjustM2 != 'empty' or \
CurLayerAdjustM3 != 'empty' or \
CurLayerAdjustM4 != 'empty' or \
CurLayerAdjustM5 != 'empty' or \
CurLayerAdjustM6 != 'empty' or \
CurLayerAdjustM7 != 'empty' or \
CurLayerAdjustM8 != 'empty' or \
CurLayerAdjustM9 != 'empty' or \
CurGrSeed != 'empty':
fo.write('export FASTROUTE_TCL = $(dir $(DESIGN_CONFIG))/fastroute-DoE-%s.tcl'%variantName)
if CurPlatform in PUBLIC:
PLATFORM_DIR = './platforms/%s'%CurPlatform
else:
PLATFORM_DIR = '../../%s'%CurPlatform
fFrIn = open('%s/fastroute.tcl'%PLATFORM_DIR,'r')
filedata = fFrIn.read()
fFrIn.close()
CurLayerAdjustSet = [CurLayerAdjust, \
CurLayerAdjustM1, \
CurLayerAdjustM2, \
CurLayerAdjustM3, \
CurLayerAdjustM4, \
CurLayerAdjustM5, \
CurLayerAdjustM6, \
CurLayerAdjustM7, \
CurLayerAdjustM8, \
CurLayerAdjustM9 ]
filedata = adjustFastRoute(filedata, CurLayerAdjustSet, CurGrOverflow)
FrName = 'fastroute-DoE-'+variantName+'.tcl'
fOutFr = open('%s/%s'%(CurChunkDir,FrName),'w')
fOutFr.write(filedata)
if CurGrSeed != 'empty':
fOutFr.write('set_global_routing_random -seed %s'%CurGrSeed)
fOutFr.close()
fo.close()
frun = open('./%s.sh'%ShellName, 'a')
RunName = 'DESIGN_CONFIG=%s/%s make\n'%(CurChunkDir,fileName)
frun.write(RunName)
frun.close()
fcollect = open('./%s_metrics_collect.sh'%ShellName, 'a')
CollectName = 'python util/genMetrics.py -x -p %s -d %s -v %s -o metrics_%s/%s.json\n'%(CurPlatform, CurDesign, variantName, ShellName, variantName)
fcollect.write(CollectName)
fcollect.close()
MakeArg = sys.argv[1]
if not os.path.isdir('./metrics_%s'%ShellName):
os.mkdir('./metrics_%s'%ShellName)
knobs = assignEmptyAttrs(SweepingAttributes)
ProductAttrs = list(productDict(knobs))
writeDoeLog(SweepingAttributes, ProductAttrs)
if os.path.isfile('./%s.sh'%ShellName):
os.remove('./%s.sh'%ShellName)
if os.path.isfile('./%s_metrics_collect.sh'%ShellName):
os.remove('./%s_metrics_collect.sh'%ShellName)
CurChunkNum = 0
for i, CurAttrs in enumerate(ProductAttrs, 1):
if i % NumFilesPerChunk == 0:
writeConfigs(CurAttrs, CurChunkNum)
CurChunkNum = CurChunkNum+1
else:
writeConfigs(CurAttrs, CurChunkNum)
# with open('file.txt') as data:
# line = data.readlines()
#
#for line in lines:
# with open('file.txt') as data:
# for line in file_data:
| true
| true
|
79025571b267d4cd93217e5a5f47808a16714369
| 525
|
py
|
Python
|
basic/list.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
basic/list.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
basic/list.py
|
anuragarwalkar/basic-python
|
1de8088b29247a4851c31e1c03fe168945f06951
|
[
"MIT"
] | null | null | null |
names = ["John", "Bob", "Dell", "python"];
print(names[0])
print(names[-1])
print(names[-2])
names[0] = "Amina"
print(names[0])
print(names[0:3])
# List methods
numbers = [1, 2, 3, 4, 5]
numbers.append(6)
numbers.insert(0, -1)
numbers.remove(3)
is_there = 1 in numbers
numbers.count(3) # it will return count of 3
# numbers.sort() # Ascending order
numbers.reverse() # descending order
numbers = numbers.copy() # To clone original list
print(is_there)
print(numbers)
print(len(numbers))
numbers.clear()
print(numbers)
| 17.5
| 49
| 0.687619
|
names = ["John", "Bob", "Dell", "python"];
print(names[0])
print(names[-1])
print(names[-2])
names[0] = "Amina"
print(names[0])
print(names[0:3])
numbers = [1, 2, 3, 4, 5]
numbers.append(6)
numbers.insert(0, -1)
numbers.remove(3)
is_there = 1 in numbers
numbers.count(3)
()
numbers = numbers.copy()
print(is_there)
print(numbers)
print(len(numbers))
numbers.clear()
print(numbers)
| true
| true
|
79025697c6f77fd08cdd3bcbcb6e54f0f3cc413f
| 1,745
|
py
|
Python
|
examples/python/helloworld/async_greeter_server.py
|
gmambro/grpc
|
61aee932d72d59ce9d8c47366aeb9df2825103ec
|
[
"Apache-2.0"
] | 2
|
2021-07-13T09:16:08.000Z
|
2021-11-17T11:07:13.000Z
|
examples/python/helloworld/async_greeter_server.py
|
gmambro/grpc
|
61aee932d72d59ce9d8c47366aeb9df2825103ec
|
[
"Apache-2.0"
] | 11
|
2019-10-15T23:03:57.000Z
|
2020-06-14T16:10:12.000Z
|
examples/python/helloworld/async_greeter_server.py
|
gmambro/grpc
|
61aee932d72d59ce9d8c47366aeb9df2825103ec
|
[
"Apache-2.0"
] | 7
|
2019-07-04T14:23:54.000Z
|
2020-04-27T08:52:51.000Z
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python AsyncIO implementation of the GRPC helloworld.Greeter server."""
import logging
import asyncio
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(
self, request: helloworld_pb2.HelloRequest,
context: grpc.aio.ServicerContext) -> helloworld_pb2.HelloReply:
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
async def serve() -> None:
server = grpc.aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = '[::]:50051'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
try:
await server.wait_for_termination()
except KeyboardInterrupt:
# Shuts down the server with 0 seconds of grace period. During the
# grace period, the server won't accept new connections and allow
# existing RPCs to continue within the grace period.
await server.stop(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
| 34.215686
| 78
| 0.731805
|
import logging
import asyncio
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
async def SayHello(
self, request: helloworld_pb2.HelloRequest,
context: grpc.aio.ServicerContext) -> helloworld_pb2.HelloReply:
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
async def serve() -> None:
server = grpc.aio.server()
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
listen_addr = '[::]:50051'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
await server.start()
try:
await server.wait_for_termination()
except KeyboardInterrupt:
# existing RPCs to continue within the grace period.
await server.stop(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
asyncio.run(serve())
| true
| true
|
790256d6575ab3621056f87be81000386bbba168
| 2,795
|
py
|
Python
|
gym_multigrid/envs/collect_game.py
|
Nikunj-Gupta/gym-multigrid
|
4fabeaf339eb93fa5f614b312a6ac4c961cc303e
|
[
"Apache-2.0"
] | 95
|
2020-04-01T15:59:31.000Z
|
2022-03-27T05:17:22.000Z
|
gym_multigrid/envs/collect_game.py
|
Nikunj-Gupta/gym-multigrid
|
4fabeaf339eb93fa5f614b312a6ac4c961cc303e
|
[
"Apache-2.0"
] | 2
|
2020-07-28T13:56:00.000Z
|
2021-03-25T23:35:48.000Z
|
gym_multigrid/envs/collect_game.py
|
Nikunj-Gupta/gym-multigrid
|
4fabeaf339eb93fa5f614b312a6ac4c961cc303e
|
[
"Apache-2.0"
] | 30
|
2020-04-17T15:15:07.000Z
|
2022-03-17T14:49:19.000Z
|
from gym_multigrid.multigrid import *
class CollectGameEnv(MultiGridEnv):
"""
Environment in which the agents have to collect the balls
"""
def __init__(
self,
size=10,
width=None,
height=None,
num_balls=[],
agents_index = [],
balls_index=[],
balls_reward=[],
zero_sum = False,
view_size=7
):
self.num_balls = num_balls
self.balls_index = balls_index
self.balls_reward = balls_reward
self.zero_sum = zero_sum
self.world = World
agents = []
for i in agents_index:
agents.append(Agent(self.world, i, view_size=view_size))
super().__init__(
grid_size=size,
width=width,
height=height,
max_steps= 10000,
# Set this to True for maximum speed
see_through_walls=False,
agents=agents,
agent_view_size=view_size
)
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(self.world, 0, 0)
self.grid.horz_wall(self.world, 0, height-1)
self.grid.vert_wall(self.world, 0, 0)
self.grid.vert_wall(self.world, width-1, 0)
for number, index, reward in zip(self.num_balls, self.balls_index, self.balls_reward):
for i in range(number):
self.place_obj(Ball(self.world, index, reward))
# Randomize the player start position and orientation
for a in self.agents:
self.place_agent(a)
def _reward(self, i, rewards, reward=1):
"""
Compute the reward to be given upon success
"""
for j,a in enumerate(self.agents):
if a.index==i or a.index==0:
rewards[j]+=reward
if self.zero_sum:
if a.index!=i or a.index==0:
rewards[j] -= reward
def _handle_pickup(self, i, rewards, fwd_pos, fwd_cell):
if fwd_cell:
if fwd_cell.can_pickup():
if fwd_cell.index in [0, self.agents[i].index]:
fwd_cell.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
self._reward(i, rewards, fwd_cell.reward)
def _handle_drop(self, i, rewards, fwd_pos, fwd_cell):
pass
def step(self, actions):
obs, rewards, done, info = MultiGridEnv.step(self, actions)
return obs, rewards, done, info
class CollectGame4HEnv10x10N2(CollectGameEnv):
def __init__(self):
super().__init__(size=10,
num_balls=[5],
agents_index = [1,2,3],
balls_index=[0],
balls_reward=[1],
zero_sum=True)
| 28.232323
| 94
| 0.562433
|
from gym_multigrid.multigrid import *
class CollectGameEnv(MultiGridEnv):
def __init__(
self,
size=10,
width=None,
height=None,
num_balls=[],
agents_index = [],
balls_index=[],
balls_reward=[],
zero_sum = False,
view_size=7
):
self.num_balls = num_balls
self.balls_index = balls_index
self.balls_reward = balls_reward
self.zero_sum = zero_sum
self.world = World
agents = []
for i in agents_index:
agents.append(Agent(self.world, i, view_size=view_size))
super().__init__(
grid_size=size,
width=width,
height=height,
max_steps= 10000,
see_through_walls=False,
agents=agents,
agent_view_size=view_size
)
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
self.grid.horz_wall(self.world, 0, 0)
self.grid.horz_wall(self.world, 0, height-1)
self.grid.vert_wall(self.world, 0, 0)
self.grid.vert_wall(self.world, width-1, 0)
for number, index, reward in zip(self.num_balls, self.balls_index, self.balls_reward):
for i in range(number):
self.place_obj(Ball(self.world, index, reward))
for a in self.agents:
self.place_agent(a)
def _reward(self, i, rewards, reward=1):
for j,a in enumerate(self.agents):
if a.index==i or a.index==0:
rewards[j]+=reward
if self.zero_sum:
if a.index!=i or a.index==0:
rewards[j] -= reward
def _handle_pickup(self, i, rewards, fwd_pos, fwd_cell):
if fwd_cell:
if fwd_cell.can_pickup():
if fwd_cell.index in [0, self.agents[i].index]:
fwd_cell.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
self._reward(i, rewards, fwd_cell.reward)
def _handle_drop(self, i, rewards, fwd_pos, fwd_cell):
pass
def step(self, actions):
obs, rewards, done, info = MultiGridEnv.step(self, actions)
return obs, rewards, done, info
class CollectGame4HEnv10x10N2(CollectGameEnv):
def __init__(self):
super().__init__(size=10,
num_balls=[5],
agents_index = [1,2,3],
balls_index=[0],
balls_reward=[1],
zero_sum=True)
| true
| true
|
790256fa3652e1957e6a614ecbebd0beda2d6df5
| 11,041
|
py
|
Python
|
preprocessing/make_dataset_new.py
|
ziqi123/AutoParking
|
bc2c86fe93892c0502cc7cf689d8ec072d2974d1
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/make_dataset_new.py
|
ziqi123/AutoParking
|
bc2c86fe93892c0502cc7cf689d8ec072d2974d1
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/make_dataset_new.py
|
ziqi123/AutoParking
|
bc2c86fe93892c0502cc7cf689d8ec072d2974d1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import cv2
from PIL import Image
import numpy as np
import random
import itertools
import matplotlib.pyplot as plt # plt 用于显示图片
from tqdm import tqdm
# 标注文件数据处理
def read_pslot(annt_file):
# print(annt_file)
with open(annt_file, "r") as f:
annt = f.readlines()
# print("annt", annt)
l = []
l_ign = []
for line in annt:
line_annt = line.strip('\n').split(' ')
# print(line_annt)
if len(line_annt) != 13 or line_annt[0] != 'line' or line_annt[-4] == '3':
continue
if line_annt[-4] in ['0', '1']:
l.append(np.array([int(line_annt[i + 1]) for i in range(8)]))
# continue
# if line_annt[-4] in ['1', '5']:
# l_ign.append(np.array([int(line_annt[i + 1]) for i in range(8)]))
# continue
return l, l_ign
# 标点
def colorize(points_list, img, save_path, item, line, point_color):
save_path = os.path.join(save_path, str(
item.strip('.jpg'))+"_"+str(line)+".jpg")
img2 = img.copy()
# print(save_path)
# points_list = 384 * np.abs(np.array(outputs[0], dtype=np.float))
point_size = 1
thickness = 4 # 可以为 0、4、8
for i in range(4):
cv2.circle(img2, (int(points_list[i][0]), int(points_list[i][1])),
point_size, point_color, thickness)
# print(save_path)
cv2.imwrite(save_path, img2)
# 画线
def paint_line(img, dst, cropimg_path, num):
img2 = img.copy()
cv2.line(img2, (int(dst[0][0]), int(dst[0][1])), (int(
dst[1][0]), int(dst[1][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[1][0]), int(dst[1][1])), (int(
dst[2][0]), int(dst[2][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[2][0]), int(dst[2][1])), (int(
dst[3][0]), int(dst[3][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[3][0]), int(dst[3][1])), (int(
dst[0][0]), int(dst[0][1])), (255, 0, 0), 5)
cropimg_path1 = os.path.join(
cropimg_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
cv2.imwrite(cropimg_path1, img2)
def Crop_pic(ps, img_path, cropimg_path, perspective_path, txt_file, i, trans_path, save_path1, save_path2):
# single pic
img = cv2.imread(img_path)
perspective3 = np.float32([[0, 0], [383, 0], [383, 383], [0, 383]])
perspective3_ = np.float32([[0, 0], [383, 0], [383, 383]])
num = 0
for line in ps:
num = num + 1
# 随机生成4个坐标
arr0 = random.randint(80, 120)
arr1 = random.randint(80, 120)
arr2 = random.randint(263, 303)
arr3 = random.randint(80, 120)
arr4 = random.randint(263, 303)
arr5 = random.randint(263, 303)
arr6 = random.randint(80, 120)
arr7 = random.randint(263, 303)
perspective0 = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]], [line[6], line[7]]])
perspective0_ = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]]])
colorize(perspective0, img, save_path1, i, num, (0, 255, 0))
perspective1 = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5], [arr6, arr7]])
perspective1_ = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5]])
# 求逆变换矩阵
# trans_inv = cv2.getPerspectiveTransform(perspective1, perspective0)
trans_inv = cv2.getAffineTransform(perspective1_, perspective0_)
# 求逆投影变换后的点坐标
dst = []
# mat = np.array(
# [[[0, 0], [383, 0], [383, 383], [0, 383]]], dtype=np.float32)
mat = np.array(
[[0, 0, 1], [383, 0, 1], [383, 383, 1], [0, 383, 1]], dtype=np.float32)
mat = mat.transpose()
# dst = cv2.perspectiveTransform(mat, trans_inv)
dst = np.dot(trans_inv, mat)
dst = dst.transpose()
# 画线
paint_line(img, dst, cropimg_path, num)
# 将停车位投影变换后得到在384*384分辨率下的停车位图像
# perspective2 = np.float32([[dst[0][0][0], dst[0][0][1]], [dst[0][1][0], dst[0][1][1]], [
# dst[0][2][0], dst[0][2][1]], [dst[0][3][0], dst[0][3][1]]])
perspective2_ = np.float32([[dst[0][0], dst[0][1]], [dst[1][0], dst[1][1]], [
dst[2][0], dst[2][1]]])
# trans = cv2.getPerspectiveTransform(perspective2, perspective3)
# dst2 = cv2.warpPerspective(img, trans, (384, 384))
trans = cv2.getAffineTransform(perspective2_, perspective3_)
dst2 = cv2.warpAffine(img, trans, (384, 384))
# 保存原图四个内角点在384*384图片上的坐标
# mat2 = np.array([[[line[0], line[1]], [line[2], line[3]], [
# line[4], line[5]], [line[6], line[7]]]], dtype=np.float32)
mat2 = np.array([[line[0], line[1], 1], [line[2], line[3], 1], [
line[4], line[5], 1], [line[6], line[7], 1]], dtype=np.float32)
mat2 = mat2.transpose()
point = np.dot(trans, mat2)
point = point.transpose()
# point = cv2.perspectiveTransform(mat2, trans)
# point = np.dot(mat2, trans)
perspective_path1 = os.path.join(
perspective_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
# print(perspective_path)
cv2.imwrite(perspective_path1, dst2)
colorize(point, dst2, save_path2, i, num, (0, 255, 0))
# 把四个坐标点记录下来
txt_file1 = os.path.join(
txt_file, i.strip('.jpg')+'_'+str(num)+'_OA.txt')
with open(txt_file1, "w") as f:
for j in range(4):
f.write(str(point[j][0]))
f.write(' ')
f.write(str(point[j][1]))
f.write('\n')
# 把转换矩阵记录下来
trans_path1 = os.path.join(
trans_path, i.strip('.jpg')+'_'+str(num)+'.txt')
with open(trans_path1, "w") as ff:
for j in range(2):
for k in range(3):
ff.write(str(trans_inv[j][k]))
ff.write(" ")
# 计算四个点的预测点与真值点之间的误差
def get_acc(y, y_hat, dis):
total = 0
total = 0
for i in range(4):
total += ((y[i][0]-y_hat[i][0])**2 + (y[i][1]-y_hat[i][1])**2)**0.5
total /= 4
if total < dis:
return 1
else:
return 0
def output_pic(img_path, output_path, trans_path, fina_path, ps2, pix, point_path):
img_pred = cv2.imread(img_path)
point_pred = []
trans_inv = []
point_pred = np.loadtxt(output_path)
point_pred = 384*np.expand_dims(point_pred, axis=0)
trans_inv = np.loadtxt(trans_path)
trans_inv = trans_inv.reshape(3, 3)
trans_inv = np.mat(trans_inv)
point_ground = np.loadtxt(point_path)
point_ground = np.expand_dims(point_ground, axis=0)
point_ground2 = cv2.perspectiveTransform(point_ground, trans_inv)
point_size = 1
thickness = 4
for i in range(4):
cv2.circle(img_pred, (int(point_ground2[0][i][0]), int(point_ground2[0][i][1])),
point_size, (0, 255, 0), thickness)
cv2.imwrite(fina_path, img_pred)
point_pred2 = cv2.perspectiveTransform(point_pred, trans_inv)
# 红色
point_color = (0, 0, 255)
point_color2 = (0, 255, 0)
for i in range(4):
cv2.circle(img_pred, (int(point_pred2[0][i][0]), int(point_pred2[0][i][1])),
point_size, point_color, thickness)
cv2.imwrite(fina_path, img_pred)
point_pred3 = point_pred2[0]
ps2 = ps2[0].reshape(4, 2)
tmp = get_acc(point_pred3, point_ground2[0], pix)
return tmp
# 精度
def output(pix):
accuracy = 0
for i in os.listdir(test_dir):
output_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/CNN/output", i.strip('.jpg')+'.txt')
img_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/img", i)
trans_inv = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/trans_inv", i.strip('.jpg')+'.txt')
fina_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/fina", i)
annt_path2 = os.path.join(
'./Ps_locate_dataset/annt', i.strip('.jpg')+'_OA.txt')
point_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/point", i.strip('.jpg')+'_OA.txt')
# print(fina_path)
ps2, _ = read_pslot(annt_path2)
tmp = output_pic(img_path, output_path,
trans_inv, fina_path, ps2, pix, point_path)
accuracy += tmp
return accuracy
if __name__ == "__main__":
data_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/pic'
label_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/annotation'
crop_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/crop_img'
perspective_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective_img'
txt_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/point'
cnt = 0
f1 = open(
"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/train_list.txt", "w")
# f2 = open("./Ps_locate_dataset/val_list.txt", "w")
test_dir = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/test_img"
trans_path = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/trans_inv"
save_path1 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/src_img"
save_path2 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective2_img"
pbar = tqdm(total=len(os.listdir(data_dir)))
for i in os.listdir(data_dir):
# print(i)
annt_file = os.path.join(label_dir, i.strip('.jpg')+'_OA.txt')
img_path = os.path.join(data_dir, i)
ps, _ = read_pslot(annt_file)
Crop_pic(ps, img_path, crop_dir,
perspective_dir, txt_dir, i, trans_path, save_path1, save_path2)
pbar.update(1)
pbar.close()
# acc = []
# for k in range(31):
# print("k", k)
# x1 = output(k)
# x1 = 100 * x1 / 743
# acc.append(x1)
# x1 = round(x1, 3)
# print(acc)
# print(len(acc))
# # 设置画布大小
# plt.figure(figsize=(30, 15))
# # 标题
# plt.title("accruracy distribution")
# # 数据
# plt.bar(range(len(acc)), acc)
# # 横坐标描述
# plt.xlabel('pixel')
# # 纵坐标描述
# plt.ylabel('accuracy')
# # # 设置数字标签
# # for a, b in zip(x, acc):
# # plt.text(a, b, b, ha='center', va='bottom', fontsize=10)
# plt.savefig(
# "/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/accuracy.png")
# 保存训练数据的文件名
filenames = os.listdir(perspective_dir)
filenames.sort()
print(filenames[0])
for i in os.listdir(perspective_dir):
perspective_path = os.path.join(perspective_dir, i)
f1.write(perspective_path)
f1.write('\n')
f1.close()
| 33.156156
| 116
| 0.580926
|
import numpy as np
import os
import cv2
from PIL import Image
import numpy as np
import random
import itertools
import matplotlib.pyplot as plt
from tqdm import tqdm
def read_pslot(annt_file):
with open(annt_file, "r") as f:
annt = f.readlines()
l = []
l_ign = []
for line in annt:
line_annt = line.strip('\n').split(' ')
if len(line_annt) != 13 or line_annt[0] != 'line' or line_annt[-4] == '3':
continue
if line_annt[-4] in ['0', '1']:
l.append(np.array([int(line_annt[i + 1]) for i in range(8)]))
return l, l_ign
def colorize(points_list, img, save_path, item, line, point_color):
save_path = os.path.join(save_path, str(
item.strip('.jpg'))+"_"+str(line)+".jpg")
img2 = img.copy()
point_size = 1
thickness = 4
for i in range(4):
cv2.circle(img2, (int(points_list[i][0]), int(points_list[i][1])),
point_size, point_color, thickness)
cv2.imwrite(save_path, img2)
def paint_line(img, dst, cropimg_path, num):
img2 = img.copy()
cv2.line(img2, (int(dst[0][0]), int(dst[0][1])), (int(
dst[1][0]), int(dst[1][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[1][0]), int(dst[1][1])), (int(
dst[2][0]), int(dst[2][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[2][0]), int(dst[2][1])), (int(
dst[3][0]), int(dst[3][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[3][0]), int(dst[3][1])), (int(
dst[0][0]), int(dst[0][1])), (255, 0, 0), 5)
cropimg_path1 = os.path.join(
cropimg_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
cv2.imwrite(cropimg_path1, img2)
def Crop_pic(ps, img_path, cropimg_path, perspective_path, txt_file, i, trans_path, save_path1, save_path2):
img = cv2.imread(img_path)
perspective3 = np.float32([[0, 0], [383, 0], [383, 383], [0, 383]])
perspective3_ = np.float32([[0, 0], [383, 0], [383, 383]])
num = 0
for line in ps:
num = num + 1
arr0 = random.randint(80, 120)
arr1 = random.randint(80, 120)
arr2 = random.randint(263, 303)
arr3 = random.randint(80, 120)
arr4 = random.randint(263, 303)
arr5 = random.randint(263, 303)
arr6 = random.randint(80, 120)
arr7 = random.randint(263, 303)
perspective0 = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]], [line[6], line[7]]])
perspective0_ = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]]])
colorize(perspective0, img, save_path1, i, num, (0, 255, 0))
perspective1 = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5], [arr6, arr7]])
perspective1_ = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5]])
trans_inv = cv2.getAffineTransform(perspective1_, perspective0_)
dst = []
mat = np.array(
[[0, 0, 1], [383, 0, 1], [383, 383, 1], [0, 383, 1]], dtype=np.float32)
mat = mat.transpose()
dst = np.dot(trans_inv, mat)
dst = dst.transpose()
paint_line(img, dst, cropimg_path, num)
perspective2_ = np.float32([[dst[0][0], dst[0][1]], [dst[1][0], dst[1][1]], [
dst[2][0], dst[2][1]]])
trans = cv2.getAffineTransform(perspective2_, perspective3_)
dst2 = cv2.warpAffine(img, trans, (384, 384))
mat2 = np.array([[line[0], line[1], 1], [line[2], line[3], 1], [
line[4], line[5], 1], [line[6], line[7], 1]], dtype=np.float32)
mat2 = mat2.transpose()
point = np.dot(trans, mat2)
point = point.transpose()
perspective_path1 = os.path.join(
perspective_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
cv2.imwrite(perspective_path1, dst2)
colorize(point, dst2, save_path2, i, num, (0, 255, 0))
txt_file1 = os.path.join(
txt_file, i.strip('.jpg')+'_'+str(num)+'_OA.txt')
with open(txt_file1, "w") as f:
for j in range(4):
f.write(str(point[j][0]))
f.write(' ')
f.write(str(point[j][1]))
f.write('\n')
trans_path1 = os.path.join(
trans_path, i.strip('.jpg')+'_'+str(num)+'.txt')
with open(trans_path1, "w") as ff:
for j in range(2):
for k in range(3):
ff.write(str(trans_inv[j][k]))
ff.write(" ")
def get_acc(y, y_hat, dis):
total = 0
total = 0
for i in range(4):
total += ((y[i][0]-y_hat[i][0])**2 + (y[i][1]-y_hat[i][1])**2)**0.5
total /= 4
if total < dis:
return 1
else:
return 0
def output_pic(img_path, output_path, trans_path, fina_path, ps2, pix, point_path):
img_pred = cv2.imread(img_path)
point_pred = []
trans_inv = []
point_pred = np.loadtxt(output_path)
point_pred = 384*np.expand_dims(point_pred, axis=0)
trans_inv = np.loadtxt(trans_path)
trans_inv = trans_inv.reshape(3, 3)
trans_inv = np.mat(trans_inv)
point_ground = np.loadtxt(point_path)
point_ground = np.expand_dims(point_ground, axis=0)
point_ground2 = cv2.perspectiveTransform(point_ground, trans_inv)
point_size = 1
thickness = 4
for i in range(4):
cv2.circle(img_pred, (int(point_ground2[0][i][0]), int(point_ground2[0][i][1])),
point_size, (0, 255, 0), thickness)
cv2.imwrite(fina_path, img_pred)
point_pred2 = cv2.perspectiveTransform(point_pred, trans_inv)
point_color = (0, 0, 255)
point_color2 = (0, 255, 0)
for i in range(4):
cv2.circle(img_pred, (int(point_pred2[0][i][0]), int(point_pred2[0][i][1])),
point_size, point_color, thickness)
cv2.imwrite(fina_path, img_pred)
point_pred3 = point_pred2[0]
ps2 = ps2[0].reshape(4, 2)
tmp = get_acc(point_pred3, point_ground2[0], pix)
return tmp
def output(pix):
accuracy = 0
for i in os.listdir(test_dir):
output_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/CNN/output", i.strip('.jpg')+'.txt')
img_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/img", i)
trans_inv = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/trans_inv", i.strip('.jpg')+'.txt')
fina_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/fina", i)
annt_path2 = os.path.join(
'./Ps_locate_dataset/annt', i.strip('.jpg')+'_OA.txt')
point_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/point", i.strip('.jpg')+'_OA.txt')
ps2, _ = read_pslot(annt_path2)
tmp = output_pic(img_path, output_path,
trans_inv, fina_path, ps2, pix, point_path)
accuracy += tmp
return accuracy
if __name__ == "__main__":
data_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/pic'
label_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/annotation'
crop_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/crop_img'
perspective_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective_img'
txt_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/point'
cnt = 0
f1 = open(
"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/train_list.txt", "w")
test_dir = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/test_img"
trans_path = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/trans_inv"
save_path1 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/src_img"
save_path2 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective2_img"
pbar = tqdm(total=len(os.listdir(data_dir)))
for i in os.listdir(data_dir):
annt_file = os.path.join(label_dir, i.strip('.jpg')+'_OA.txt')
img_path = os.path.join(data_dir, i)
ps, _ = read_pslot(annt_file)
Crop_pic(ps, img_path, crop_dir,
perspective_dir, txt_dir, i, trans_path, save_path1, save_path2)
pbar.update(1)
pbar.close()
enames[0])
for i in os.listdir(perspective_dir):
perspective_path = os.path.join(perspective_dir, i)
f1.write(perspective_path)
f1.write('\n')
f1.close()
| true
| true
|
790258ec7df84e57acc4bee76d98d0e80bce2354
| 8,895
|
py
|
Python
|
products_and_services_client/api/invoice_financings_api.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | 2
|
2021-02-07T23:58:36.000Z
|
2021-02-08T01:03:25.000Z
|
products_and_services_client/api/invoice_financings_api.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
products_and_services_client/api/invoice_financings_api.py
|
pitzer42/opbk-br-quickstart
|
b3f86b2e5f82a6090aaefb563614e174a452383c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from products_and_services_client.api_client import ApiClient
class InvoiceFinancingsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_business_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_business_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_business_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/business-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseBusinessInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_personal_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_personal_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_personal_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/personal-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponsePersonalInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.709821
| 117
| 0.636875
|
from __future__ import absolute_import
import re
import six
from products_and_services_client.api_client import ApiClient
class InvoiceFinancingsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_business_invoice_financings(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_business_invoice_financings_with_http_info(**kwargs)
else:
(data) = self.get_business_invoice_financings_with_http_info(**kwargs)
return data
def get_business_invoice_financings_with_http_info(self, **kwargs):
all_params = ['page', 'page_size']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_business_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page']))
if 'page_size' in params:
query_params.append(('page-size', params['page_size']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/business-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseBusinessInvoiceFinancings',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_personal_invoice_financings(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_personal_invoice_financings_with_http_info(**kwargs)
else:
(data) = self.get_personal_invoice_financings_with_http_info(**kwargs)
return data
def get_personal_invoice_financings_with_http_info(self, **kwargs):
all_params = ['page', 'page_size']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_personal_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page']))
if 'page_size' in params:
query_params.append(('page-size', params['page_size']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/personal-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponsePersonalInvoiceFinancings',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
790259649967d8ddd4f5c68d708457d5ec448fed
| 501
|
py
|
Python
|
setup.py
|
wallarelvo/rover
|
004d150107932a72b99a4c7421e5ea94678355ed
|
[
"Apache-2.0"
] | 1
|
2015-03-12T16:52:01.000Z
|
2015-03-12T16:52:01.000Z
|
setup.py
|
wallarelvo/rover
|
004d150107932a72b99a4c7421e5ea94678355ed
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
wallarelvo/rover
|
004d150107932a72b99a4c7421e5ea94678355ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
from setuptools import setup
from setuptools import find_packages
setup(
name="rover",
version="0.1",
description="Algorithm for risk and sensor quality aware sensor" +
"coverage for quadrotors",
author="Alex Wallar",
author_email="wallarelvo@gmail.com",
packages=find_packages(),
install_requires=[
"numpy",
"scipy"
],
data_files=[
(
'config',
['configs/config.json'],
)
]
)
| 20.04
| 70
| 0.59481
|
from setuptools import setup
from setuptools import find_packages
setup(
name="rover",
version="0.1",
description="Algorithm for risk and sensor quality aware sensor" +
"coverage for quadrotors",
author="Alex Wallar",
author_email="wallarelvo@gmail.com",
packages=find_packages(),
install_requires=[
"numpy",
"scipy"
],
data_files=[
(
'config',
['configs/config.json'],
)
]
)
| true
| true
|
79025a1a17fd5280bcb34dcda3cfb3739e221c57
| 756
|
py
|
Python
|
LeetCode/953. Verifying an Alien Dictionary.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | 3
|
2019-01-08T02:53:44.000Z
|
2021-07-26T07:03:27.000Z
|
LeetCode/953. Verifying an Alien Dictionary.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | null | null | null |
LeetCode/953. Verifying an Alien Dictionary.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | null | null | null |
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
if len(words) <= 1:
return True
self.dic = {}
for i, char in enumerate(order):
self.dic[char] = i
for i in range(1, len(words)):
if self.cmp(words[i], words[i-1]) == -1:
return False
return True
def cmp(self, word1, word2):
for i in range(min(len(word1), len(word2))):
if self.dic[word1[i]] > self.dic[word2[i]]:
return 1
if self.dic[word1[i]] < self.dic[word2[i]]:
return -1
if len(word1) > len(word2):
return 1
if len(word1) < len(word2):
return -1
return 0
| 31.5
| 66
| 0.478836
|
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
if len(words) <= 1:
return True
self.dic = {}
for i, char in enumerate(order):
self.dic[char] = i
for i in range(1, len(words)):
if self.cmp(words[i], words[i-1]) == -1:
return False
return True
def cmp(self, word1, word2):
for i in range(min(len(word1), len(word2))):
if self.dic[word1[i]] > self.dic[word2[i]]:
return 1
if self.dic[word1[i]] < self.dic[word2[i]]:
return -1
if len(word1) > len(word2):
return 1
if len(word1) < len(word2):
return -1
return 0
| true
| true
|
79025a8644243c33198fb578fdfeea7a5eead1c0
| 1,208
|
py
|
Python
|
pymcuprog/deviceinfo/devices/pic16f1768.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 28
|
2021-05-08T19:28:33.000Z
|
2022-03-23T06:23:13.000Z
|
pymcuprog/deviceinfo/devices/pic16f1768.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 20
|
2021-05-24T19:20:39.000Z
|
2022-03-12T20:10:30.000Z
|
pymcuprog/deviceinfo/devices/pic16f1768.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 11
|
2021-06-24T20:59:16.000Z
|
2022-03-23T23:59:38.000Z
|
"""
Required device info for the PIC16F1768 devices
"""
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'name': 'pic16f1768',
'architecture': 'PIC16',
# Will erase Flash, User ID and Config words
'default_bulk_erase_address_word': 0x8000,
# Flash
'flash_address_word': 0,
'flash_size_words': 4*1024, # 4KW
'flash_page_size_words': 32,
'flash_write_size_words': 1,
'flash_read_size_words': 1,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': False,
# User ID
'user_id_address_word': 0x8000,
'user_id_size_words': 4,
'user_id_page_size_words': 1,
'user_id_write_size_words': 1,
'user_id_read_size_words': 1,
'user_id_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'user_id_isolated_erase': False,
# Config words
'config_words_address_word': 0x8007,
'config_words_size_words': 2,
'config_words_page_size_words': 1,
'config_words_write_size_words': 1,
'config_words_read_size_words': 1,
'config_words_erase_address_word': 0,
'config_words_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'config_words_isolated_erase': False,
}
| 30.2
| 67
| 0.725993
|
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'name': 'pic16f1768',
'architecture': 'PIC16',
'default_bulk_erase_address_word': 0x8000,
'flash_address_word': 0,
'flash_size_words': 4*1024,
'flash_page_size_words': 32,
'flash_write_size_words': 1,
'flash_read_size_words': 1,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': False,
'user_id_address_word': 0x8000,
'user_id_size_words': 4,
'user_id_page_size_words': 1,
'user_id_write_size_words': 1,
'user_id_read_size_words': 1,
'user_id_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'user_id_isolated_erase': False,
'config_words_address_word': 0x8007,
'config_words_size_words': 2,
'config_words_page_size_words': 1,
'config_words_write_size_words': 1,
'config_words_read_size_words': 1,
'config_words_erase_address_word': 0,
'config_words_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'config_words_isolated_erase': False,
}
| true
| true
|
79025b40e4ba467028712a47e95bf96742a70417
| 1,343
|
py
|
Python
|
examples/slash_commands.py
|
Astrea49/enhanced-discord.py
|
b92fc324c8bf426a7cba68372e1193d21093ff87
|
[
"MIT"
] | 1,126
|
2021-08-28T12:09:26.000Z
|
2022-03-31T16:37:54.000Z
|
examples/slash_commands.py
|
Sherlock-FH/enhanced-discord.py
|
3a4a6a51b15edf890f697377920651e24c417ea5
|
[
"MIT"
] | 89
|
2021-08-28T14:46:11.000Z
|
2022-03-04T11:19:11.000Z
|
examples/slash_commands.py
|
Sherlock-FH/enhanced-discord.py
|
3a4a6a51b15edf890f697377920651e24c417ea5
|
[
"MIT"
] | 111
|
2021-08-28T02:04:22.000Z
|
2022-03-05T17:48:31.000Z
|
import discord
from discord.ext import commands
# Set slash commands=True when constructing your bot to enable all slash commands
# if your bot is only for a couple of servers, you can use the parameter
# `slash_command_guilds=[list, of, guild, ids]` to specify this,
# then the commands will be much faster to upload.
bot = commands.Bot("!", intents=discord.Intents(guilds=True, messages=True), slash_commands=True)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user} (ID: {bot.user.id})")
print("------")
@bot.command()
# You can use commands.Option to define descriptions for your options, and converters will still work fine.
async def ping(
ctx: commands.Context, emoji: bool = commands.Option(description="whether to use an emoji when responding")
):
# This command can be used with slash commands or message commands
if emoji:
await ctx.send("\U0001f3d3")
else:
await ctx.send("Pong!")
@bot.command(message_command=False)
async def only_slash(ctx: commands.Context):
# This command can only be used with slash commands
await ctx.send("Hello from slash commands!")
@bot.command(slash_command=False)
async def only_message(ctx: commands.Context):
# This command can only be used with a message
await ctx.send("Hello from message commands!")
bot.run("token")
| 31.97619
| 111
| 0.724497
|
import discord
from discord.ext import commands
bot = commands.Bot("!", intents=discord.Intents(guilds=True, messages=True), slash_commands=True)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user} (ID: {bot.user.id})")
print("------")
@bot.command()
async def ping(
ctx: commands.Context, emoji: bool = commands.Option(description="whether to use an emoji when responding")
):
if emoji:
await ctx.send("\U0001f3d3")
else:
await ctx.send("Pong!")
@bot.command(message_command=False)
async def only_slash(ctx: commands.Context):
await ctx.send("Hello from slash commands!")
@bot.command(slash_command=False)
async def only_message(ctx: commands.Context):
await ctx.send("Hello from message commands!")
bot.run("token")
| true
| true
|
79025c313021adf45529934a895fb1d10db299ec
| 5,198
|
py
|
Python
|
src/coolc.py
|
harry1911/CoolCompiler
|
0eb4636bb50341d94f757b36d2362e9d03959046
|
[
"MIT"
] | null | null | null |
src/coolc.py
|
harry1911/CoolCompiler
|
0eb4636bb50341d94f757b36d2362e9d03959046
|
[
"MIT"
] | null | null | null |
src/coolc.py
|
harry1911/CoolCompiler
|
0eb4636bb50341d94f757b36d2362e9d03959046
|
[
"MIT"
] | null | null | null |
# source ./venv/bin/activate
# ===============================================================
# =============================COOL==============================
# ===============================================================
import sys
from general import errors
# import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# ===============================================================
def main():
# TAKE THE INPUT
programs = sys.argv[1:]
# CHECK IF AT LEAST ONE FILE IS GIVEN
if len(programs) == 0:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
# CHECK IF FILEOUT IS GIVEN
if programs[0] == '-o':
if len(programs) == 1:
errors.throw_error(errors.CompilerError(text="No fileout is given to coolc compiler."))
fileout = programs[1]
if not str(fileout).endswith(".asm"):
errors.throw_error(errors.CompilerError(text="Fileout must end with .asm extension."))
if len(programs) == 2:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
programs = programs[2:]
else:
fileout = programs[0].split(".cl")[0] + ".asm"
# Check all programs have the *.cl extension.
for program in programs:
if not str(program).endswith(".cl"):
errors.throw_error(errors.CompilerError(text="Cool program files must end with a .cl extension."))
code = ""
# Read all program source codes.
for program in programs:
try:
with open(program, encoding="utf-8") as file:
code += file.read() + '\n'
except (IOError, FileNotFoundError):
errors.throw_error(errors.CompilerError(text=f'File "{program}" was not found.'))
except Exception:
errors.throw_error(errors.CompilerError(text="An unexpected error occurred!"))
print(f"Compiling file '{fileout}'...")
# ===============================================================
# ==================ANALISIS-LEXICOGRAFICO=======================
# ===============================================================
from lexicography.lexer_rules import CoolLex
# BUILD THE LEXER
lexer = CoolLex()
lexer.build()
# ===============================================================
# ===============================================================
# =====================ANALISIS-SINTACTICO=======================
# ===============================================================
from lexicography.grammar_rules import CoolParse
# BUILD THE PARSER
parser = CoolParse(lexer)
parser.build()
program_ast = parser.parse(code)
# ===============================================================
# ===============================================================
# ======================ANALISIS-SEMANTICO=======================
# ===============================================================
from semantic.type_collector import TypeCollectorVisitor
from semantic.type_builder import TypeBuilderVisitor
from semantic.type_checker import TypeCheckerVisitor
# from semantic.ast_types_painter import Painter
typeCollector = TypeCollectorVisitor()
typeCollector.visit(program_ast)
typeBuilder = TypeBuilderVisitor(typeCollector.enviroment)
typeBuilder.visit(program_ast)
## CHECK SEMANTIC ERRORS IN THE ENVIROMENT(check_main, cycles and inheritance rules)
final_enviroment = typeBuilder.enviroment
final_enviroment.build_types_graph()
type_checker = TypeCheckerVisitor()
type_checker.visit(program_ast, typeBuilder.enviroment)
typed_ast = program_ast
# ast_painter = Painter()
# print(ast_painter.visit(typed_ast, 0))
# ===============================================================
# ===============================================================
# ========================CODE-GENERATION========================
# ===============================================================
# COOL --> CIL
from generation.cil.cil_generator import CilGeneratorVisitor
# from general.cil_hierarchy import get_formatter
cil_code_generator = CilGeneratorVisitor(typed_ast, typeBuilder.enviroment)
ast_cil = cil_code_generator.generate_code()
# cil_painter = get_formatter()
# print(cil_painter(ast_cil))
# CIL --> MIPS
from generation.mips.mips_writer import MIPSWriterVisitor
from operator import itemgetter
types_ids = typeBuilder.enviroment.types_dict
hierarchy = [0]*len(types_ids)
for _type in typeBuilder.enviroment.types_list[1:]:
hierarchy[types_ids[_type.name]] = types_ids[_type.parent]
# tag_names = sorted(types_ids.items(), key=itemgetter(1))
ast_cil.typesHierarchy = hierarchy
# ast_cil.tag_names = tag_names
mips_code_generator = MIPSWriterVisitor(ast_cil, fileout)
mips_code_generator.generate_Mips()
if __name__ == '__main__':
main()
| 34.423841
| 111
| 0.50808
|
import sys
from general import errors
def main():
programs = sys.argv[1:]
if len(programs) == 0:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
if programs[0] == '-o':
if len(programs) == 1:
errors.throw_error(errors.CompilerError(text="No fileout is given to coolc compiler."))
fileout = programs[1]
if not str(fileout).endswith(".asm"):
errors.throw_error(errors.CompilerError(text="Fileout must end with .asm extension."))
if len(programs) == 2:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
programs = programs[2:]
else:
fileout = programs[0].split(".cl")[0] + ".asm"
for program in programs:
if not str(program).endswith(".cl"):
errors.throw_error(errors.CompilerError(text="Cool program files must end with a .cl extension."))
code = ""
for program in programs:
try:
with open(program, encoding="utf-8") as file:
code += file.read() + '\n'
except (IOError, FileNotFoundError):
errors.throw_error(errors.CompilerError(text=f'File "{program}" was not found.'))
except Exception:
errors.throw_error(errors.CompilerError(text="An unexpected error occurred!"))
print(f"Compiling file '{fileout}'...")
from lexicography.lexer_rules import CoolLex
lexer = CoolLex()
lexer.build()
from lexicography.grammar_rules import CoolParse
parser = CoolParse(lexer)
parser.build()
program_ast = parser.parse(code)
from semantic.type_collector import TypeCollectorVisitor
from semantic.type_builder import TypeBuilderVisitor
from semantic.type_checker import TypeCheckerVisitor
typeCollector = TypeCollectorVisitor()
typeCollector.visit(program_ast)
typeBuilder = TypeBuilderVisitor(typeCollector.enviroment)
typeBuilder.visit(program_ast)
ph()
type_checker = TypeCheckerVisitor()
type_checker.visit(program_ast, typeBuilder.enviroment)
typed_ast = program_ast
from generation.cil.cil_generator import CilGeneratorVisitor
cil_code_generator = CilGeneratorVisitor(typed_ast, typeBuilder.enviroment)
ast_cil = cil_code_generator.generate_code()
from generation.mips.mips_writer import MIPSWriterVisitor
from operator import itemgetter
types_ids = typeBuilder.enviroment.types_dict
hierarchy = [0]*len(types_ids)
for _type in typeBuilder.enviroment.types_list[1:]:
hierarchy[types_ids[_type.name]] = types_ids[_type.parent]
ast_cil.typesHierarchy = hierarchy
mips_code_generator = MIPSWriterVisitor(ast_cil, fileout)
mips_code_generator.generate_Mips()
if __name__ == '__main__':
main()
| true
| true
|
79025e0d15530e6af1198aa86e4145c77319b67b
| 244
|
py
|
Python
|
demo/graphQLDemos/spacy/schema.py
|
jehalladay/React-Playground
|
b461917b9e608b6913d2f03b8e18f838f43927bd
|
[
"MIT"
] | null | null | null |
demo/graphQLDemos/spacy/schema.py
|
jehalladay/React-Playground
|
b461917b9e608b6913d2f03b8e18f838f43927bd
|
[
"MIT"
] | 4
|
2021-05-11T17:43:14.000Z
|
2022-02-27T06:38:05.000Z
|
demo/graphQLDemos/spacy/schema.py
|
jehalladay/React-Playground
|
b461917b9e608b6913d2f03b8e18f838f43927bd
|
[
"MIT"
] | null | null | null |
from graphene import ObjectType, String, Schema
class ExampleQuery(ObjectType):
hello = String()
def resolve_hello(self):
return "Hello"
class RootQuery(ExampleQuery, ObjectType):
pass
schema = Schema(query=RootQuery)
| 16.266667
| 47
| 0.717213
|
from graphene import ObjectType, String, Schema
class ExampleQuery(ObjectType):
hello = String()
def resolve_hello(self):
return "Hello"
class RootQuery(ExampleQuery, ObjectType):
pass
schema = Schema(query=RootQuery)
| true
| true
|
79025f59da49987a19188780dfff74ad653228c4
| 6,118
|
py
|
Python
|
booster/logging/logger.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | 4
|
2019-12-10T06:41:29.000Z
|
2021-08-06T13:34:59.000Z
|
booster/logging/logger.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | null | null | null |
booster/logging/logger.py
|
vlievin/booster-pytorch
|
a8f447160c30224112731a25f90f6f97126a34b2
|
[
"MIT"
] | 1
|
2020-08-20T16:12:53.000Z
|
2020-08-20T16:12:53.000Z
|
import logging
import os
import sys
import warnings
from collections import namedtuple
from typing import *
import matplotlib.image
import matplotlib.pyplot as plt
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from booster import Diagnostic
from .datatracker import DataTracker
BestScore = namedtuple('BestScore', ['step', 'epoch', 'value', 'summary'])
class BaseLogger():
def __init__(self, key, logdir):
self.key = key
self.logdir = logdir
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
raise NotImplementedError
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
raise NotImplementedError
class TensorboardLogger(BaseLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.writer = SummaryWriter(os.path.join(self.logdir, self.key))
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
summary.log(self.writer, global_step)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
self.writer.add_image(key, img_tensor, global_step=global_step)
class LoggingLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.logger = logging.getLogger(self.key)
# logFormatter = logging.Formatter('%(asctime)s %(name)-4s %(levelname)-4s %(message)s')
#
# fileHandler = logging.FileHandler(os.path.join(self.logdir, 'run.log'))
# fileHandler.setFormatter(logFormatter)
# self.logger.addHandler(fileHandler)
#
# consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler.setFormatter(logFormatter)
# self.logger.addHandler(consoleHandler)
self.logger.setLevel(logging.INFO)
self.diagnostic_keys = diagnostic_keys
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, best_score: Optional[BestScore] = None,
**kwargs):
for stats_key in self.diagnostic_keys:
if not stats_key in summary.keys():
self.logger.warning('key ' + str(stats_key) + ' not in summary.')
else:
message = f'[{global_step} / {epoch}] '
message += ''.join([f'{k} {v:6.2f} ' for k, v in summary.get(stats_key).items()])
if "info" in summary.keys() and "elapsed-time" in summary["info"].keys():
message += f'({summary["info"]["elapsed-time"]:.2f}s /iter)'
else:
warnings.warn(
f"Summary does not contain the key info/elapsed-time. The elapsed time won't be displayed.")
if best_score is not None:
message += f' (best: {best_score.value:6.2f} [{best_score.step} | {best_score.epoch}])'
self.logger.info(message)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
pass
class PlotLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.diagnostic_keys = diagnostic_keys
self.tracker = DataTracker(label=self.key)
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
for key in self.diagnostic_keys:
self.tracker.append(global_step, summary[key])
def plot(self, *args, **kwargs):
self.tracker.plot(*args, **kwargs)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
img = img_tensor.data.permute(1, 2, 0).cpu().numpy()
matplotlib.image.imsave(os.path.join(self.logdir, f"{key}.png"), img)
class PlotHandler(List):
def __init__(self, logdir, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = os.path.join(logdir, "curves.png")
def plot(self):
if len(self):
logger = self[0]
keys = logger.tracker.data.keys()
plt.figure(figsize=(4 * len(keys), 3))
for i, key in enumerate(keys):
plt.subplot(1, len(keys), i + 1)
plt.title(key)
for logger in self:
logger.plot(key)
plt.legend()
plt.savefig(self.path)
class Logger(BaseLogger):
def __init__(self, key, logdir, tensorboard=True, logging=True, plot=True, **kwargs):
super().__init__(key, logdir)
self.loggers = []
if tensorboard:
self.loggers += [TensorboardLogger(key, logdir, **kwargs)]
if logging:
self.loggers += [LoggingLogger(key, logdir, **kwargs)]
if plot:
self.loggers += [PlotLogger(key, logdir, **kwargs)]
def log_diagnostic(self, *args, **kwargs):
for logger in self.loggers:
logger.log_diagnostic(*args, **kwargs)
def log_image(self, *args, **kwargs):
for logger in self.loggers:
logger.log_image(*args, **kwargs)
class LoggerManager():
def __init__(self, logdir, **kwargs):
self.logdir = logdir
self.kwargs = kwargs
self.loggers = {}
self.plot_handler = PlotHandler(self.logdir)
def init_logger(self, key):
self.loggers[key] = Logger(key, self.logdir, **self.kwargs)
# mappend PlotLogger to PlotHandler
for logger in self.loggers[key].loggers:
if isinstance(logger, PlotLogger):
self.plot_handler.append(logger)
def log_diagnostic(self, key, step, epoch, summary, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_diagnostic(step, epoch, summary, **kwargs)
self.plot_handler.plot()
def log_image(self, key, image_key, step, epoch, img_tensor, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_image(image_key, step, epoch, img_tensor, **kwargs)
| 33.801105
| 119
| 0.61932
|
import logging
import os
import sys
import warnings
from collections import namedtuple
from typing import *
import matplotlib.image
import matplotlib.pyplot as plt
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from booster import Diagnostic
from .datatracker import DataTracker
BestScore = namedtuple('BestScore', ['step', 'epoch', 'value', 'summary'])
class BaseLogger():
def __init__(self, key, logdir):
self.key = key
self.logdir = logdir
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
raise NotImplementedError
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
raise NotImplementedError
class TensorboardLogger(BaseLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.writer = SummaryWriter(os.path.join(self.logdir, self.key))
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
summary.log(self.writer, global_step)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
self.writer.add_image(key, img_tensor, global_step=global_step)
class LoggingLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.logger = logging.getLogger(self.key)
self.logger.setLevel(logging.INFO)
self.diagnostic_keys = diagnostic_keys
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, best_score: Optional[BestScore] = None,
**kwargs):
for stats_key in self.diagnostic_keys:
if not stats_key in summary.keys():
self.logger.warning('key ' + str(stats_key) + ' not in summary.')
else:
message = f'[{global_step} / {epoch}] '
message += ''.join([f'{k} {v:6.2f} ' for k, v in summary.get(stats_key).items()])
if "info" in summary.keys() and "elapsed-time" in summary["info"].keys():
message += f'({summary["info"]["elapsed-time"]:.2f}s /iter)'
else:
warnings.warn(
f"Summary does not contain the key info/elapsed-time. The elapsed time won't be displayed.")
if best_score is not None:
message += f' (best: {best_score.value:6.2f} [{best_score.step} | {best_score.epoch}])'
self.logger.info(message)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
pass
class PlotLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.diagnostic_keys = diagnostic_keys
self.tracker = DataTracker(label=self.key)
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
for key in self.diagnostic_keys:
self.tracker.append(global_step, summary[key])
def plot(self, *args, **kwargs):
self.tracker.plot(*args, **kwargs)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
img = img_tensor.data.permute(1, 2, 0).cpu().numpy()
matplotlib.image.imsave(os.path.join(self.logdir, f"{key}.png"), img)
class PlotHandler(List):
def __init__(self, logdir, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = os.path.join(logdir, "curves.png")
def plot(self):
if len(self):
logger = self[0]
keys = logger.tracker.data.keys()
plt.figure(figsize=(4 * len(keys), 3))
for i, key in enumerate(keys):
plt.subplot(1, len(keys), i + 1)
plt.title(key)
for logger in self:
logger.plot(key)
plt.legend()
plt.savefig(self.path)
class Logger(BaseLogger):
def __init__(self, key, logdir, tensorboard=True, logging=True, plot=True, **kwargs):
super().__init__(key, logdir)
self.loggers = []
if tensorboard:
self.loggers += [TensorboardLogger(key, logdir, **kwargs)]
if logging:
self.loggers += [LoggingLogger(key, logdir, **kwargs)]
if plot:
self.loggers += [PlotLogger(key, logdir, **kwargs)]
def log_diagnostic(self, *args, **kwargs):
for logger in self.loggers:
logger.log_diagnostic(*args, **kwargs)
def log_image(self, *args, **kwargs):
for logger in self.loggers:
logger.log_image(*args, **kwargs)
class LoggerManager():
def __init__(self, logdir, **kwargs):
self.logdir = logdir
self.kwargs = kwargs
self.loggers = {}
self.plot_handler = PlotHandler(self.logdir)
def init_logger(self, key):
self.loggers[key] = Logger(key, self.logdir, **self.kwargs)
# mappend PlotLogger to PlotHandler
for logger in self.loggers[key].loggers:
if isinstance(logger, PlotLogger):
self.plot_handler.append(logger)
def log_diagnostic(self, key, step, epoch, summary, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_diagnostic(step, epoch, summary, **kwargs)
self.plot_handler.plot()
def log_image(self, key, image_key, step, epoch, img_tensor, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_image(image_key, step, epoch, img_tensor, **kwargs)
| true
| true
|
79025fd0ea72e3aa1a18d07f90810ce291ac8d40
| 149,012
|
py
|
Python
|
yt_dlp/extractor/generic.py
|
king-millez/yt-dlp
|
ff2751ac9cc7d4150797d3207da9b566396bc796
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/generic.py
|
king-millez/yt-dlp
|
ff2751ac9cc7d4150797d3207da9b566396bc796
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/generic.py
|
king-millez/yt-dlp
|
ff2751ac9cc7d4150797d3207da9b566396bc796
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
int_or_none,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_timestamp,
unsmuggle_url,
UnsupportedError,
url_or_none,
xpath_attr,
xpath_text,
xpath_with_ns,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .mofosex import MofosexEmbedIE
from .spankwire import SpankwireIE
from .youporn import YouPornIE
from .vimeo import (
VimeoIE,
VHXEmbedIE,
)
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .arte import ArteTVEmbedIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
from .gedidigital import GediDigitalIE
from .rcs import RCSEmbedsIE
from .bitchute import BitChuteIE
from .rumble import RumbleEmbedIE
from .arcpublishing import ArcPublishingIE
from .medialaan import MedialaanIE
from .simplecast import SimplecastIE
from .wimtv import WimTVIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'http://podcastfeeds.nbcnews.com/nbcnews/video/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'title': 'MSNBC Rachel Maddow (video)',
'description': 're:.*her unique approach to storytelling.*',
},
'playlist': [{
'info_dict': {
'ext': 'mov',
'id': 'pdv_maddow_netcast_mov-12-03-2020-223726',
'title': 'MSNBC Rachel Maddow (video) - 12-03-2020-223726',
'description': 're:.*her unique approach to storytelling.*',
'upload_date': '20201204',
},
}],
},
# RSS feed with item with description and thumbnails
{
'url': 'https://anchor.fm/s/dd00e14/podcast/rss',
'info_dict': {
'id': 'https://anchor.fm/s/dd00e14/podcast/rss',
'title': 're:.*100% Hydrogen.*',
'description': 're:.*In this episode.*',
},
'playlist': [{
'info_dict': {
'ext': 'm4a',
'id': 'c1c879525ce2cb640b344507e682c36d',
'title': 're:Hydrogen!',
'description': 're:.*In this episode we are going.*',
'timestamp': 1567977776,
'upload_date': '20190908',
'duration': 459,
'thumbnail': r're:^https?://.*\.jpg$',
'episode_number': 1,
'season_number': 1,
'age_limit': 0,
},
}],
'params': {
'skip_download': True,
},
},
# RSS feed with enclosures and unsupported link URLs
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer'
# in the http requests
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# embedded with itemprop embedURL and video id spelled as `idVideo`
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
{
# Brightcove video in <iframe>
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
# Brightcove with alternative playerID key
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
'skip': 'video rotates...weekly?',
},
{
# Brightcove:new type [2].
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
# Alternative brightcove <video> attributes
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# DailyMail embed
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVServices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# YouTube <object> embed
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/ytdl-org/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# TuneIn station embed
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
# Live stream
'skip_download': True,
},
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'cplapp@learn360.com',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': 'sebastian.salinas@thechive.com',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Kinja embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
{
# Brightcove embed with whitespace around attribute names
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': 'videos.expansion@el-mundo.net',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# multiple kaltura embeds, nsfw
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
'info_dict': {
'id': 'kamila-avec-video-jaime-sadomie',
'title': "Kamila avec vídeo “J'aime sadomie”",
},
'playlist_count': 8,
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
# Videa embeds
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
# 20 minuten embed
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
# VideoPress embed
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
# Rutube embed
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
# ThePlatform embedded with whitespaces in URLs
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
# Senate ISVP iframe https
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
# Limelight embeds (1 channel embed + 4 media embeds)
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
# Limelight embed (LimelightPlayerUtil.embed)
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
# WashingtonPost embed
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
# {
# # Zype embed
# 'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
# 'info_dict': {
# 'id': '5b400b834b32992a310622b9',
# 'ext': 'mp4',
# 'title': 'Smoky Barbecue Favorites',
# 'thumbnail': r're:^https?://.*\.jpe?g',
# 'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
# 'upload_date': '20170909',
# 'timestamp': 1504915200,
# },
# 'add_ie': [ZypeIE.ie_key()],
# 'params': {
# 'skip_download': True,
# },
# },
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# },
{
# VHX Embed
'url': 'https://demo.vhx.tv/category-c/videos/file-example-mp4-480-1-5mg-copy',
'info_dict': {
'id': '858208',
'ext': 'mp4',
'title': 'Untitled',
'uploader_id': 'user80538407',
'uploader': 'OTT Videos',
},
},
{
# ArcPublishing PoWa video player
'url': 'https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/',
'md5': 'b03b2fac8680e1e5a7cc81a5c27e71b3',
'info_dict': {
'id': '8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'ext': 'mp4',
'title': 'Senate candidates wave to voters on Anchorage streets',
'description': 'md5:91f51a6511f090617353dc720318b20e',
'timestamp': 1604378735,
'upload_date': '20201103',
'duration': 1581,
},
},
{
# MyChannels SDK embed
# https://www.24kitchen.nl/populair/deskundige-dit-waarom-sommigen-gevoelig-zijn-voor-voedselallergieen
'url': 'https://www.demorgen.be/nieuws/burgemeester-rotterdam-richt-zich-in-videoboodschap-tot-relschoppers-voelt-het-goed~b0bcfd741/',
'md5': '90c0699c37006ef18e198c032d81739c',
'info_dict': {
'id': '194165',
'ext': 'mp4',
'title': 'Burgemeester Aboutaleb spreekt relschoppers toe',
'timestamp': 1611740340,
'upload_date': '20210127',
'duration': 159,
},
},
{
# Simplecast player embed
'url': 'https://www.bio.org/podcast',
'info_dict': {
'id': 'podcast',
'title': 'I AM BIO Podcast | BIO',
},
'playlist_mincount': 52,
},
{
# WimTv embed player
'url': 'http://www.msmotor.tv/wearefmi-pt-2-2021/',
'info_dict': {
'id': 'wearefmi-pt-2-2021',
'title': '#WEAREFMI – PT.2 – 2021 – MsMotorTV',
},
'playlist_count': 1,
},
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
NS_MAP = {
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
}
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
def itunes(key):
return xpath_text(
it, xpath_with_ns('./itunes:%s' % key, NS_MAP),
default=None)
duration = itunes('duration')
explicit = (itunes('explicit') or '').lower()
if explicit in ('true', 'yes'):
age_limit = 18
elif explicit in ('false', 'no'):
age_limit = 0
else:
age_limit = None
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
'description': xpath_text(it, 'description', default=None),
'timestamp': unified_timestamp(
xpath_text(it, 'pubDate', default=None)),
'duration': int_or_none(duration) or parse_duration(duration),
'thumbnail': url_or_none(xpath_attr(it, xpath_with_ns('./itunes:image', NS_MAP), 'href')),
'episode': itunes('title'),
'episode_number': int_or_none(itunes('episode')),
'season_number': int_or_none(itunes('season')),
'age_limit': age_limit,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self.get_param('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run yt-dlp "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'timestamp': unified_timestamp(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
subtitles = {}
if format_id.endswith('mpegurl'):
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
info_dict['subtitles'] = subtitles
return info_dict
if not self.get_param('test', False) and not is_intentional:
force = self.get_param('force_generic_extractor', False)
self.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to yt-dlp default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
if '<title>DPG Media Privacy Gate</title>' in webpage:
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
try:
doc = compat_etree_fromstring(webpage)
except compat_xml_parse_error:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'], info_dict['subtitles'] = self._parse_ism_formats_and_subtitles(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=full_response.geturl()),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
doc,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
# FIXME: unescaping the whole page may break URLs, commenting out for now.
# There probably should be a second run of generic extractor on unescaped webpage.
# webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
arc_urls = ArcPublishingIE._extract_urls(webpage)
if arc_urls:
return self.playlist_from_matches(arc_urls, video_id, video_title, ie=ArcPublishingIE.ie_key())
mychannels_urls = MedialaanIE._extract_urls(webpage)
if mychannels_urls:
return self.playlist_from_matches(
mychannels_urls, video_id, video_title, ie=MedialaanIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vhx_url = VHXEmbedIE._extract_url(webpage)
if vhx_url:
return self.url_result(vhx_url, VHXEmbedIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Invidious Instances
# https://github.com/yt-dlp/yt-dlp/issues/195
# https://github.com/iv-org/invidious/pull/1730
youtube_url = self._search_regex(
r'<link rel="alternate" href="(https://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})"',
webpage, 'youtube link', default=None)
if youtube_url:
return self.url_result(youtube_url, YoutubeIE.ie_key())
# Look for YouTube embeds
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for Teachable embeds, must be before Wistia
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for Simplecast embeds
simplecast_urls = SimplecastIE._extract_urls(webpage)
if simplecast_urls:
return self.playlist_from_matches(
simplecast_urls, video_id, video_title)
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Mofosex player
mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
if mofosex_urls:
return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
# Look for embedded Spankwire player
spankwire_urls = SpankwireIE._extract_urls(webpage)
if spankwire_urls:
return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
# Look for embedded YouPorn player
youporn_urls = YouPornIE._extract_urls(webpage)
if youporn_urls:
return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
arte_urls = ArteTVEmbedIE._extract_urls(webpage)
if arte_urls:
return self.playlist_from_matches(arte_urls, video_id, video_title)
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m\.mlb\.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_urls = KalturaIE._extract_urls(webpage)
if kaltura_urls:
return self.playlist_from_matches(
kaltura_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'source_url': url}),
ie=KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
gedi_urls = GediDigitalIE._extract_urls(webpage)
if gedi_urls:
return self.playlist_from_matches(
gedi_urls, video_id, video_title, ie=GediDigitalIE.ie_key())
# Look for RCS media group embeds
rcs_urls = RCSEmbedsIE._extract_urls(webpage)
if rcs_urls:
return self.playlist_from_matches(
rcs_urls, video_id, video_title, ie=RCSEmbedsIE.ie_key())
wimtv_urls = WimTVIE._extract_urls(webpage)
if wimtv_urls:
return self.playlist_from_matches(
wimtv_urls, video_id, video_title, ie=WimTVIE.ie_key())
bitchute_urls = BitChuteIE._extract_urls(webpage)
if bitchute_urls:
return self.playlist_from_matches(
bitchute_urls, video_id, video_title, ie=BitChuteIE.ie_key())
rumble_urls = RumbleEmbedIE._extract_urls(webpage)
if len(rumble_urls) == 1:
return self.url_result(rumble_urls[0], RumbleEmbedIE.ie_key())
if rumble_urls:
return self.playlist_from_matches(
rumble_urls, video_id, video_title, ie=RumbleEmbedIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by yt-dlp thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
| 40.959868
| 432
| 0.519898
|
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
int_or_none,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_timestamp,
unsmuggle_url,
UnsupportedError,
url_or_none,
xpath_attr,
xpath_text,
xpath_with_ns,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .mofosex import MofosexEmbedIE
from .spankwire import SpankwireIE
from .youporn import YouPornIE
from .vimeo import (
VimeoIE,
VHXEmbedIE,
)
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .arte import ArteTVEmbedIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
from .gedidigital import GediDigitalIE
from .rcs import RCSEmbedsIE
from .bitchute import BitChuteIE
from .rumble import RumbleEmbedIE
from .arcpublishing import ArcPublishingIE
from .medialaan import MedialaanIE
from .simplecast import SimplecastIE
from .wimtv import WimTVIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True,
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'http://podcastfeeds.nbcnews.com/nbcnews/video/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'title': 'MSNBC Rachel Maddow (video)',
'description': 're:.*her unique approach to storytelling.*',
},
'playlist': [{
'info_dict': {
'ext': 'mov',
'id': 'pdv_maddow_netcast_mov-12-03-2020-223726',
'title': 'MSNBC Rachel Maddow (video) - 12-03-2020-223726',
'description': 're:.*her unique approach to storytelling.*',
'upload_date': '20201204',
},
}],
},
{
'url': 'https://anchor.fm/s/dd00e14/podcast/rss',
'info_dict': {
'id': 'https://anchor.fm/s/dd00e14/podcast/rss',
'title': 're:.*100% Hydrogen.*',
'description': 're:.*In this episode.*',
},
'playlist': [{
'info_dict': {
'ext': 'm4a',
'id': 'c1c879525ce2cb640b344507e682c36d',
'title': 're:Hydrogen!',
'description': 're:.*In this episode we are going.*',
'timestamp': 1567977776,
'upload_date': '20190908',
'duration': 459,
'thumbnail': r're:^https?://.*\.jpg$',
'episode_number': 1,
'season_number': 1,
'age_limit': 0,
},
}],
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
'skip_download': True,
},
'skip': 'video gone',
},
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
'skip_download': True,
},
'skip': 'video gone',
},
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True,
},
'skip': 'video rotates...weekly?',
},
{
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
'expected_warnings': ['301'],
},
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
'skip': 'No videos on this page',
},
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'cplapp@learn360.com',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': 'sebastian.salinas@thechive.com',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': 'videos.expansion@el-mundo.net',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# multiple kaltura embeds, nsfw
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
'info_dict': {
'id': 'kamila-avec-video-jaime-sadomie',
'title': "Kamila avec vídeo “J'aime sadomie”",
},
'playlist_count': 8,
},
{
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos !
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
# {
# # Zype embed
# 'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
# 'info_dict': {
# 'id': '5b400b834b32992a310622b9',
# 'ext': 'mp4',
# 'title': 'Smoky Barbecue Favorites',
# 'thumbnail': r're:^https?://.*\.jpe?g',
# 'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
# 'upload_date': '20170909',
# 'timestamp': 1504915200,
# },
# 'add_ie': [ZypeIE.ie_key()],
# 'params': {
# 'skip_download': True,
# },
# },
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# },
{
# VHX Embed
'url': 'https://demo.vhx.tv/category-c/videos/file-example-mp4-480-1-5mg-copy',
'info_dict': {
'id': '858208',
'ext': 'mp4',
'title': 'Untitled',
'uploader_id': 'user80538407',
'uploader': 'OTT Videos',
},
},
{
# ArcPublishing PoWa video player
'url': 'https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/',
'md5': 'b03b2fac8680e1e5a7cc81a5c27e71b3',
'info_dict': {
'id': '8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'ext': 'mp4',
'title': 'Senate candidates wave to voters on Anchorage streets',
'description': 'md5:91f51a6511f090617353dc720318b20e',
'timestamp': 1604378735,
'upload_date': '20201103',
'duration': 1581,
},
},
{
# MyChannels SDK embed
# https://www.24kitchen.nl/populair/deskundige-dit-waarom-sommigen-gevoelig-zijn-voor-voedselallergieen
'url': 'https://www.demorgen.be/nieuws/burgemeester-rotterdam-richt-zich-in-videoboodschap-tot-relschoppers-voelt-het-goed~b0bcfd741/',
'md5': '90c0699c37006ef18e198c032d81739c',
'info_dict': {
'id': '194165',
'ext': 'mp4',
'title': 'Burgemeester Aboutaleb spreekt relschoppers toe',
'timestamp': 1611740340,
'upload_date': '20210127',
'duration': 159,
},
},
{
# Simplecast player embed
'url': 'https://www.bio.org/podcast',
'info_dict': {
'id': 'podcast',
'title': 'I AM BIO Podcast | BIO',
},
'playlist_mincount': 52,
},
{
# WimTv embed player
'url': 'http://www.msmotor.tv/wearefmi-pt-2-2021/',
'info_dict': {
'id': 'wearefmi-pt-2-2021',
'title': '
},
'playlist_count': 1,
},
]
def report_following_redirect(self, new_url):
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
NS_MAP = {
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
}
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
def itunes(key):
return xpath_text(
it, xpath_with_ns('./itunes:%s' % key, NS_MAP),
default=None)
duration = itunes('duration')
explicit = (itunes('explicit') or '').lower()
if explicit in ('true', 'yes'):
age_limit = 18
elif explicit in ('false', 'no'):
age_limit = 0
else:
age_limit = None
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
'description': xpath_text(it, 'description', default=None),
'timestamp': unified_timestamp(
xpath_text(it, 'pubDate', default=None)),
'duration': int_or_none(duration) or parse_duration(duration),
'thumbnail': url_or_none(xpath_attr(it, xpath_with_ns('./itunes:image', NS_MAP), 'href')),
'episode': itunes('title'),
'episode_number': int_or_none(itunes('episode')),
'season_number': int_or_none(itunes('season')),
'age_limit': age_limit,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self.get_param('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run yt-dlp "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'timestamp': unified_timestamp(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
subtitles = {}
if format_id.endswith('mpegurl'):
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
info_dict['subtitles'] = subtitles
return info_dict
if not self.get_param('test', False) and not is_intentional:
force = self.get_param('force_generic_extractor', False)
self.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to yt-dlp default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
if '<title>DPG Media Privacy Gate</title>' in webpage:
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
try:
doc = compat_etree_fromstring(webpage)
except compat_xml_parse_error:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'], info_dict['subtitles'] = self._parse_ism_formats_and_subtitles(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=full_response.geturl()),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
doc,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
# FIXME: unescaping the whole page may break URLs, commenting out for now.
# There probably should be a second run of generic extractor on unescaped webpage.
# webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
arc_urls = ArcPublishingIE._extract_urls(webpage)
if arc_urls:
return self.playlist_from_matches(arc_urls, video_id, video_title, ie=ArcPublishingIE.ie_key())
mychannels_urls = MedialaanIE._extract_urls(webpage)
if mychannels_urls:
return self.playlist_from_matches(
mychannels_urls, video_id, video_title, ie=MedialaanIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vhx_url = VHXEmbedIE._extract_url(webpage)
if vhx_url:
return self.url_result(vhx_url, VHXEmbedIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
youtube_url = self._search_regex(
r'<link rel="alternate" href="(https://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})"',
webpage, 'youtube link', default=None)
if youtube_url:
return self.url_result(youtube_url, YoutubeIE.ie_key())
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for Teachable embeds, must be before Wistia
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for Simplecast embeds
simplecast_urls = SimplecastIE._extract_urls(webpage)
if simplecast_urls:
return self.playlist_from_matches(
simplecast_urls, video_id, video_title)
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Mofosex player
mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
if mofosex_urls:
return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
# Look for embedded Spankwire player
spankwire_urls = SpankwireIE._extract_urls(webpage)
if spankwire_urls:
return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
# Look for embedded YouPorn player
youporn_urls = YouPornIE._extract_urls(webpage)
if youporn_urls:
return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
arte_urls = ArteTVEmbedIE._extract_urls(webpage)
if arte_urls:
return self.playlist_from_matches(arte_urls, video_id, video_title)
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m\.mlb\.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_urls = KalturaIE._extract_urls(webpage)
if kaltura_urls:
return self.playlist_from_matches(
kaltura_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'source_url': url}),
ie=KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
gedi_urls = GediDigitalIE._extract_urls(webpage)
if gedi_urls:
return self.playlist_from_matches(
gedi_urls, video_id, video_title, ie=GediDigitalIE.ie_key())
# Look for RCS media group embeds
rcs_urls = RCSEmbedsIE._extract_urls(webpage)
if rcs_urls:
return self.playlist_from_matches(
rcs_urls, video_id, video_title, ie=RCSEmbedsIE.ie_key())
wimtv_urls = WimTVIE._extract_urls(webpage)
if wimtv_urls:
return self.playlist_from_matches(
wimtv_urls, video_id, video_title, ie=WimTVIE.ie_key())
bitchute_urls = BitChuteIE._extract_urls(webpage)
if bitchute_urls:
return self.playlist_from_matches(
bitchute_urls, video_id, video_title, ie=BitChuteIE.ie_key())
rumble_urls = RumbleEmbedIE._extract_urls(webpage)
if len(rumble_urls) == 1:
return self.url_result(rumble_urls[0], RumbleEmbedIE.ie_key())
if rumble_urls:
return self.playlist_from_matches(
rumble_urls, video_id, video_title, ie=RumbleEmbedIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
| true
| true
|
79026015ec7f4683525737974a3905f4f262c182
| 8,728
|
py
|
Python
|
CNN/src/models.py
|
thunlp/AMNRE
|
16970d9887561c75886123828960943b7efc9c62
|
[
"MIT"
] | 25
|
2018-06-05T11:20:36.000Z
|
2021-07-22T11:43:39.000Z
|
CNN/src/models.py
|
thunlp/AMNRE
|
16970d9887561c75886123828960943b7efc9c62
|
[
"MIT"
] | 9
|
2018-06-27T05:02:41.000Z
|
2021-04-28T08:47:58.000Z
|
CNN/src/models.py
|
thunlp/AMNRE
|
16970d9887561c75886123828960943b7efc9c62
|
[
"MIT"
] | 7
|
2018-07-03T19:45:21.000Z
|
2019-04-14T11:07:33.000Z
|
from __future__ import print_function
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from constant import *
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderGRU(nn.Module):
def __init__(self,
vocab_size,emb_dim,emb,
hidden_dim,
nlayers,
pad_token,
bidir=False):
#emb---np wordVec vocab_size=len(emb)
super(EncoderGRU,self).__init__()
#self.word_emb=nn.Embedding(vocab_size,emb_dim,pad_token)
#self.word_emb.weight.data.copy_(torch.from_numpy(emb))
#self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
#self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
self.hidden_dim=hidden_dim
self.emb_dim=emb_dim+dimWPE*2
self.nlayers=nlayers
self.bidir=bidir
#using gru
self.gru=nn.GRU(
self.emb_dim//2 if bidir else self.emb_dim,
self.hidden_dim,
self.nlayers,
bidirectional=bidir,
batch_first=True
)
def forward(self,input_,pos1,pos2):
embd=self.word_emb(input_)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2)
#using gru
_,h_t_=self.encoder(embed)
h_t=torch.cat((h_t_[-1],h_t_[-2]),1)if self.bidir else h_t_[-1]
return h_t
class EncoderCNN(nn.Module):
def __init__(self,
vocab_size,emb,emb_dim=dimWE,
hidden_dim=dimC,lang=0):
#emb---np wordVec vocab_size=len(emb)
super(EncoderCNN,self).__init__()
self.lang=lang
self.word_emb=nn.Embedding(vocab_size,emb_dim)
self.word_emb.weight.data.copy_(torch.from_numpy(emb))
self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
self.maxPooling=nn.MaxPool1d(SenLen[self.lang]-2)
self.emb_dim=emb_dim+dimWPE*2
self.hidden_dim=hidden_dim
#using CNN
self.tanh=nn.Tanh()
self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size)
self.dropout=nn.Dropout(p=CNNDropout)
def forward(self,inp,pos1,pos2):
Len=inp.size(0)
embd=self.word_emb(inp)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2).transpose(1,2)
conved=self.conv(embd)
pooled=self.maxPooling(conved).view(Len,dimC)
out=self.tanh(pooled)
return self.dropout(out)
class CNNEncoder(nn.Module):
def __init__(self,vocab_en,emb_en,vocab_zh,emb_zh):
super(CNNEncoder,self).__init__()
self.encoder_en=EncoderCNN(vocab_en,emb_en,dimWE,dimC,0)
self.encoder_zh=EncoderCNN(vocab_zh,emb_zh,dimWE,dimC,1)
def forward(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
return self.encoder_en(wordsEn,pos1En,pos2En),self.encoder_zh(wordsZh,pos1Zh,pos2Zh)
class Discriminator(nn.Module):
def __init__(self,
dis_input_dim=Encodered_dim,
nlayers=dis_layers,
hidden_dim=dis_hidden_dim,
input_dropout=dis_input_dropout,
dropout=dis_dropout):
super(Discriminator,self).__init__()
self.dis_input=dis_input_dim
layers=[nn.Dropout(input_dropout)]
for i in range(0,nlayers+1):
input_dim=self.dis_input if i==0 else hidden_dim
output_dim=1 if i==nlayers else hidden_dim
layers.append(nn.Linear(input_dim,output_dim))
if i<nlayers:
layers.append(nn.LeakyReLU(0.2))
layers.append(nn.Dropout(dropout))
layers.append(nn.Sigmoid())
self.layers=nn.Sequential(*layers)
def forward(self,inp):
assert inp.dim()==2 and inp.size(1)==self.dis_input
return self.layers(inp).view(-1)
class MultiRE(nn.Module):
def __init__(self):
super(MultiRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
#self.softmax=nn.Softmax()
#self.logsoftmax=nn.LogSoftmax()
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp_en,r_en,l_en,inp_zh,r_zh,l_zh,re_mask):
NumRe=r_en.size(0)
NumIn=l_zh.size(0)
relation_en=self.relation_emb(r_en)
relation_zh=self.relation_emb(r_zh)
attn_en=torch.sum(relation_en*inp_en,2)
attn_zh=torch.sum(relation_zh*inp_zh,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L_en=0
L_zh=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R_en=L_en+l_en[i].data[0]
R_zh=L_zh+l_zh[i].data[0]
if R_en>L_en and R_zh>L_zh:
Att=F.softmax(torch.cat((attn_en[:,L_en:R_en],attn_zh[:,L_zh:R_zh]),1),1)
S[i]=self.dropout(torch.matmul(Att,torch.cat((inp_en[L_en:R_en],inp_zh[L_zh:R_zh]),0)))
R_vec[i]=relation_en[:,L_en,:]
elif R_en>L_en:
Att=F.softmax(attn_en[:,L_en:R_en],1)
S[i]=self.dropout(torch.matmul(Att,inp_en[L_en:R_en]))
R_vec[i]=relation_en[:,L_en,:]
elif R_zh>L_zh:
Att=F.softmax(attn_zh[:,L_zh:R_zh],1)
S[i]=self.dropout(torch.matmul(Att,inp_zh[L_zh:R_zh]))
R_vec[i]=relation_zh[:,L_zh,:]
else:
print("ERR NO sentences")
exit()
L_en=R_en
L_zh=R_zh
p_n=F.log_softmax(self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class MonoRE(nn.Module):
def __init__(self):
super(MonoRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
#self.softmax=nn.Softmax()
#self.logsoftmax=nn.LogSoftmax()
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp,r,l,re_mask):
NumRe=r.size(0)
NumIn=l.size(0)
relation=self.relation_emb(r)
attn=torch.sum(relation*inp,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R=L+l[i].data[0]
if R>L:
Att=F.softmax(attn[:,L:R],1)
S[i]=self.dropout(torch.matmul(Att,inp[L:R]))
R_vec[i]=relation[:,L,:]
L=R
p_n=F.log_softmax((self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1)),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class AMRE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(AMRE,self).__init__()
self.encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.enRE=MonoRE().cuda()
self.zhRE=MonoRE().cuda()
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
inp_en,inp_zh=self.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.enRE(inp_en,rEn,lEn,re_mask)+self.zhRE(inp_zh,rZh,lZh,re_mask)
class MARE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(MARE,self).__init__()
self.D=Discriminator().cuda()
self.share_encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.multiRE=MultiRE().cuda()
self.monoRE=AMRE(emb_en,emb_zh)
def Orth_con(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
mono_en,mono_zh=self.monoRE.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
share=torch.cat((share_en,share_zh),0)
mono=torch.cat((mono_en,mono_zh),0)
share-=torch.mean(share,0)
mono-=torch.mean(mono,0)
share=F.normalize(share,2,1)
mono=F.normalize(mono,2,1)
correlation_mat=torch.matmul(share.transpose(0,1),mono)
cost=torch.mean(correlation_mat*correlation_mat)
return cost
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.monoRE(wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask)+self.multiRE(share_en,rEn,lEn,share_zh,rZh,lZh,re_mask)
| 43.422886
| 151
| 0.635655
|
from __future__ import print_function
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from constant import *
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderGRU(nn.Module):
def __init__(self,
vocab_size,emb_dim,emb,
hidden_dim,
nlayers,
pad_token,
bidir=False):
super(EncoderGRU,self).__init__()
self.hidden_dim=hidden_dim
self.emb_dim=emb_dim+dimWPE*2
self.nlayers=nlayers
self.bidir=bidir
self.gru=nn.GRU(
self.emb_dim//2 if bidir else self.emb_dim,
self.hidden_dim,
self.nlayers,
bidirectional=bidir,
batch_first=True
)
def forward(self,input_,pos1,pos2):
embd=self.word_emb(input_)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2)
_,h_t_=self.encoder(embed)
h_t=torch.cat((h_t_[-1],h_t_[-2]),1)if self.bidir else h_t_[-1]
return h_t
class EncoderCNN(nn.Module):
def __init__(self,
vocab_size,emb,emb_dim=dimWE,
hidden_dim=dimC,lang=0):
super(EncoderCNN,self).__init__()
self.lang=lang
self.word_emb=nn.Embedding(vocab_size,emb_dim)
self.word_emb.weight.data.copy_(torch.from_numpy(emb))
self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
self.maxPooling=nn.MaxPool1d(SenLen[self.lang]-2)
self.emb_dim=emb_dim+dimWPE*2
self.hidden_dim=hidden_dim
self.tanh=nn.Tanh()
self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size)
self.dropout=nn.Dropout(p=CNNDropout)
def forward(self,inp,pos1,pos2):
Len=inp.size(0)
embd=self.word_emb(inp)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2).transpose(1,2)
conved=self.conv(embd)
pooled=self.maxPooling(conved).view(Len,dimC)
out=self.tanh(pooled)
return self.dropout(out)
class CNNEncoder(nn.Module):
def __init__(self,vocab_en,emb_en,vocab_zh,emb_zh):
super(CNNEncoder,self).__init__()
self.encoder_en=EncoderCNN(vocab_en,emb_en,dimWE,dimC,0)
self.encoder_zh=EncoderCNN(vocab_zh,emb_zh,dimWE,dimC,1)
def forward(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
return self.encoder_en(wordsEn,pos1En,pos2En),self.encoder_zh(wordsZh,pos1Zh,pos2Zh)
class Discriminator(nn.Module):
def __init__(self,
dis_input_dim=Encodered_dim,
nlayers=dis_layers,
hidden_dim=dis_hidden_dim,
input_dropout=dis_input_dropout,
dropout=dis_dropout):
super(Discriminator,self).__init__()
self.dis_input=dis_input_dim
layers=[nn.Dropout(input_dropout)]
for i in range(0,nlayers+1):
input_dim=self.dis_input if i==0 else hidden_dim
output_dim=1 if i==nlayers else hidden_dim
layers.append(nn.Linear(input_dim,output_dim))
if i<nlayers:
layers.append(nn.LeakyReLU(0.2))
layers.append(nn.Dropout(dropout))
layers.append(nn.Sigmoid())
self.layers=nn.Sequential(*layers)
def forward(self,inp):
assert inp.dim()==2 and inp.size(1)==self.dis_input
return self.layers(inp).view(-1)
class MultiRE(nn.Module):
def __init__(self):
super(MultiRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp_en,r_en,l_en,inp_zh,r_zh,l_zh,re_mask):
NumRe=r_en.size(0)
NumIn=l_zh.size(0)
relation_en=self.relation_emb(r_en)
relation_zh=self.relation_emb(r_zh)
attn_en=torch.sum(relation_en*inp_en,2)
attn_zh=torch.sum(relation_zh*inp_zh,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L_en=0
L_zh=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R_en=L_en+l_en[i].data[0]
R_zh=L_zh+l_zh[i].data[0]
if R_en>L_en and R_zh>L_zh:
Att=F.softmax(torch.cat((attn_en[:,L_en:R_en],attn_zh[:,L_zh:R_zh]),1),1)
S[i]=self.dropout(torch.matmul(Att,torch.cat((inp_en[L_en:R_en],inp_zh[L_zh:R_zh]),0)))
R_vec[i]=relation_en[:,L_en,:]
elif R_en>L_en:
Att=F.softmax(attn_en[:,L_en:R_en],1)
S[i]=self.dropout(torch.matmul(Att,inp_en[L_en:R_en]))
R_vec[i]=relation_en[:,L_en,:]
elif R_zh>L_zh:
Att=F.softmax(attn_zh[:,L_zh:R_zh],1)
S[i]=self.dropout(torch.matmul(Att,inp_zh[L_zh:R_zh]))
R_vec[i]=relation_zh[:,L_zh,:]
else:
print("ERR NO sentences")
exit()
L_en=R_en
L_zh=R_zh
p_n=F.log_softmax(self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class MonoRE(nn.Module):
def __init__(self):
super(MonoRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp,r,l,re_mask):
NumRe=r.size(0)
NumIn=l.size(0)
relation=self.relation_emb(r)
attn=torch.sum(relation*inp,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R=L+l[i].data[0]
if R>L:
Att=F.softmax(attn[:,L:R],1)
S[i]=self.dropout(torch.matmul(Att,inp[L:R]))
R_vec[i]=relation[:,L,:]
L=R
p_n=F.log_softmax((self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1)),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class AMRE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(AMRE,self).__init__()
self.encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.enRE=MonoRE().cuda()
self.zhRE=MonoRE().cuda()
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
inp_en,inp_zh=self.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.enRE(inp_en,rEn,lEn,re_mask)+self.zhRE(inp_zh,rZh,lZh,re_mask)
class MARE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(MARE,self).__init__()
self.D=Discriminator().cuda()
self.share_encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.multiRE=MultiRE().cuda()
self.monoRE=AMRE(emb_en,emb_zh)
def Orth_con(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
mono_en,mono_zh=self.monoRE.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
share=torch.cat((share_en,share_zh),0)
mono=torch.cat((mono_en,mono_zh),0)
share-=torch.mean(share,0)
mono-=torch.mean(mono,0)
share=F.normalize(share,2,1)
mono=F.normalize(mono,2,1)
correlation_mat=torch.matmul(share.transpose(0,1),mono)
cost=torch.mean(correlation_mat*correlation_mat)
return cost
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.monoRE(wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask)+self.multiRE(share_en,rEn,lEn,share_zh,rZh,lZh,re_mask)
| true
| true
|
790260919ac0f5bfddec0cc2a99dc20d566fefef
| 1,358
|
py
|
Python
|
modoboa/admin/forms/import_.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | 1
|
2019-06-12T19:24:42.000Z
|
2019-06-12T19:24:42.000Z
|
modoboa/admin/forms/import_.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | null | null | null |
modoboa/admin/forms/import_.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | 1
|
2020-11-20T00:25:23.000Z
|
2020-11-20T00:25:23.000Z
|
"""Forms related to import operations."""
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy
class ImportDataForm(forms.Form):
"""Base form to import objects."""
sourcefile = forms.FileField(label=ugettext_lazy("Select a file"))
sepchar = forms.CharField(
label=ugettext_lazy("Separator"),
max_length=1,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
continue_if_exists = forms.BooleanField(
label=ugettext_lazy("Continue on error"), required=False,
help_text=ugettext_lazy("Don't treat duplicated objects as error")
)
def __init__(self, *args, **kwargs):
super(ImportDataForm, self).__init__(*args, **kwargs)
self.fields["sepchar"].widget.attrs = {"class": "col-md-1 form-control"}
def clean_sepchar(self):
if self.cleaned_data["sepchar"] == "":
return ";"
return self.cleaned_data["sepchar"]
class ImportIdentitiesForm(ImportDataForm):
"""A form to import identities."""
crypt_password = forms.BooleanField(
label=ugettext_lazy("Crypt passwords"), required=False,
help_text=ugettext_lazy(
"Check this option if passwords contained in your file "
"are not crypted"
)
)
| 29.521739
| 80
| 0.662003
|
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy
class ImportDataForm(forms.Form):
sourcefile = forms.FileField(label=ugettext_lazy("Select a file"))
sepchar = forms.CharField(
label=ugettext_lazy("Separator"),
max_length=1,
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
continue_if_exists = forms.BooleanField(
label=ugettext_lazy("Continue on error"), required=False,
help_text=ugettext_lazy("Don't treat duplicated objects as error")
)
def __init__(self, *args, **kwargs):
super(ImportDataForm, self).__init__(*args, **kwargs)
self.fields["sepchar"].widget.attrs = {"class": "col-md-1 form-control"}
def clean_sepchar(self):
if self.cleaned_data["sepchar"] == "":
return ";"
return self.cleaned_data["sepchar"]
class ImportIdentitiesForm(ImportDataForm):
crypt_password = forms.BooleanField(
label=ugettext_lazy("Crypt passwords"), required=False,
help_text=ugettext_lazy(
"Check this option if passwords contained in your file "
"are not crypted"
)
)
| true
| true
|
7902613ce459589bf5ff191f2e31f4823aa204cf
| 5,576
|
py
|
Python
|
tsl/data/mixin.py
|
TorchSpatiotemporal/tsl
|
da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0
|
[
"MIT"
] | 4
|
2022-03-21T09:16:33.000Z
|
2022-03-30T12:24:30.000Z
|
tsl/data/mixin.py
|
TorchSpatiotemporal/tsl
|
da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0
|
[
"MIT"
] | null | null | null |
tsl/data/mixin.py
|
TorchSpatiotemporal/tsl
|
da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0
|
[
"MIT"
] | null | null | null |
from typing import Optional, Union, Tuple, Mapping, List
from torch import Tensor
from torch_geometric.data.storage import recursive_apply
from torch_geometric.typing import Adj
from torch_sparse import SparseTensor
from tsl.ops.connectivity import convert_torch_connectivity
from tsl.typing import DataArray, SparseTensArray, ScipySparseMatrix
from . import utils
class DataParsingMixin:
def _parse_data(self, obj: DataArray) -> Tensor:
assert obj is not None
obj = utils.copy_to_tensor(obj)
obj = utils.to_steps_nodes_channels(obj)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_mask(self, mask: Optional[DataArray]) -> Optional[Tensor]:
if mask is None:
return None
mask = utils.copy_to_tensor(mask)
mask = utils.to_steps_nodes_channels(mask)
self._check_same_dim(mask.size(0), 'n_steps', 'mask')
self._check_same_dim(mask.size(1), 'n_nodes', 'mask')
if mask.size(-1) > 1:
self._check_same_dim(mask.size(-1), 'n_channels', 'mask')
mask = utils.cast_tensor(mask)
return mask
def _parse_exogenous(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_steps_nodes_channels(obj)
self._check_same_dim(obj.shape[1], 'n_nodes', name)
else:
obj = utils.to_steps_channels(obj)
self._check_same_dim(obj.shape[0], 'n_steps', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_attribute(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_nodes_channels(obj)
self._check_same_dim(obj.shape[0], 'n_nodes', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_adj(self, connectivity: Union[SparseTensArray, Tuple[DataArray]],
target_layout: Optional[str] = None
) -> Tuple[Optional[Adj], Optional[Tensor]]:
# format in [sparse, edge_index, None], where None means keep as input
if connectivity is None:
return None, None
# Convert to torch
# from np.ndarray, pd.DataFrame or torch.Tensor
if isinstance(connectivity, DataArray.__args__):
connectivity = utils.copy_to_tensor(connectivity)
elif isinstance(connectivity, (list, tuple)):
connectivity = recursive_apply(connectivity, utils.copy_to_tensor)
# from scipy sparse matrix
elif isinstance(connectivity, ScipySparseMatrix):
connectivity = SparseTensor.from_scipy(connectivity)
elif not isinstance(connectivity, SparseTensor):
raise TypeError("`connectivity` must be a dense matrix or in "
"COO format (i.e., an `edge_index`).")
if target_layout is not None:
connectivity = convert_torch_connectivity(connectivity,
target_layout,
num_nodes=self.n_nodes)
if isinstance(connectivity, (list, tuple)):
edge_index, edge_weight = connectivity
if edge_weight is not None:
edge_weight = utils.cast_tensor(edge_weight, self.precision)
else:
edge_index, edge_weight = connectivity, None
self._check_same_dim(edge_index.size(0), 'n_nodes', 'connectivity')
return edge_index, edge_weight
def _check_same_dim(self, dim: int, attr: str, name: str):
dim_data = getattr(self, attr)
if dim != dim_data:
raise ValueError("Cannot assign {0} with {1}={2}: data has {1}={3}"
.format(name, attr, dim, dim_data))
def _check_name(self, name: str):
if name.startswith('edge_'):
raise ValueError(f"Cannot set attribute with name '{name}' in this "
f"way, consider adding edge attributes as "
f"{self.name}.{name} = value.")
# name cannot be an attribute of self, nor a key in get
invalid_names = set(dir(self)).union(self.keys)
if name in invalid_names:
raise ValueError(f"Cannot set attribute with name '{name}', there "
f"is already an attribute named '{name}' in the "
"dataset.")
def _value_to_kwargs(self, value: Union[DataArray, List, Tuple, Mapping],
keys: Optional[Union[List, Tuple]] = None):
if isinstance(value, DataArray.__args__):
return dict(value=value)
if isinstance(value, (list, tuple)):
return dict(zip(keys, value))
elif isinstance(value, Mapping):
return value
else:
raise TypeError('Invalid type for value "{}"'.format(type(value)))
def _exog_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_input_map', 'synch_mode',
'preprocess']
return self._value_to_kwargs(value, keys)
def _attr_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_batch']
return self._value_to_kwargs(value, keys)
| 43.224806
| 80
| 0.603659
|
from typing import Optional, Union, Tuple, Mapping, List
from torch import Tensor
from torch_geometric.data.storage import recursive_apply
from torch_geometric.typing import Adj
from torch_sparse import SparseTensor
from tsl.ops.connectivity import convert_torch_connectivity
from tsl.typing import DataArray, SparseTensArray, ScipySparseMatrix
from . import utils
class DataParsingMixin:
def _parse_data(self, obj: DataArray) -> Tensor:
assert obj is not None
obj = utils.copy_to_tensor(obj)
obj = utils.to_steps_nodes_channels(obj)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_mask(self, mask: Optional[DataArray]) -> Optional[Tensor]:
if mask is None:
return None
mask = utils.copy_to_tensor(mask)
mask = utils.to_steps_nodes_channels(mask)
self._check_same_dim(mask.size(0), 'n_steps', 'mask')
self._check_same_dim(mask.size(1), 'n_nodes', 'mask')
if mask.size(-1) > 1:
self._check_same_dim(mask.size(-1), 'n_channels', 'mask')
mask = utils.cast_tensor(mask)
return mask
def _parse_exogenous(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_steps_nodes_channels(obj)
self._check_same_dim(obj.shape[1], 'n_nodes', name)
else:
obj = utils.to_steps_channels(obj)
self._check_same_dim(obj.shape[0], 'n_steps', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_attribute(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_nodes_channels(obj)
self._check_same_dim(obj.shape[0], 'n_nodes', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_adj(self, connectivity: Union[SparseTensArray, Tuple[DataArray]],
target_layout: Optional[str] = None
) -> Tuple[Optional[Adj], Optional[Tensor]]:
if connectivity is None:
return None, None
if isinstance(connectivity, DataArray.__args__):
connectivity = utils.copy_to_tensor(connectivity)
elif isinstance(connectivity, (list, tuple)):
connectivity = recursive_apply(connectivity, utils.copy_to_tensor)
elif isinstance(connectivity, ScipySparseMatrix):
connectivity = SparseTensor.from_scipy(connectivity)
elif not isinstance(connectivity, SparseTensor):
raise TypeError("`connectivity` must be a dense matrix or in "
"COO format (i.e., an `edge_index`).")
if target_layout is not None:
connectivity = convert_torch_connectivity(connectivity,
target_layout,
num_nodes=self.n_nodes)
if isinstance(connectivity, (list, tuple)):
edge_index, edge_weight = connectivity
if edge_weight is not None:
edge_weight = utils.cast_tensor(edge_weight, self.precision)
else:
edge_index, edge_weight = connectivity, None
self._check_same_dim(edge_index.size(0), 'n_nodes', 'connectivity')
return edge_index, edge_weight
def _check_same_dim(self, dim: int, attr: str, name: str):
dim_data = getattr(self, attr)
if dim != dim_data:
raise ValueError("Cannot assign {0} with {1}={2}: data has {1}={3}"
.format(name, attr, dim, dim_data))
def _check_name(self, name: str):
if name.startswith('edge_'):
raise ValueError(f"Cannot set attribute with name '{name}' in this "
f"way, consider adding edge attributes as "
f"{self.name}.{name} = value.")
invalid_names = set(dir(self)).union(self.keys)
if name in invalid_names:
raise ValueError(f"Cannot set attribute with name '{name}', there "
f"is already an attribute named '{name}' in the "
"dataset.")
def _value_to_kwargs(self, value: Union[DataArray, List, Tuple, Mapping],
keys: Optional[Union[List, Tuple]] = None):
if isinstance(value, DataArray.__args__):
return dict(value=value)
if isinstance(value, (list, tuple)):
return dict(zip(keys, value))
elif isinstance(value, Mapping):
return value
else:
raise TypeError('Invalid type for value "{}"'.format(type(value)))
def _exog_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_input_map', 'synch_mode',
'preprocess']
return self._value_to_kwargs(value, keys)
def _attr_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_batch']
return self._value_to_kwargs(value, keys)
| true
| true
|
79026154de1f814d7e362c3cba2da3815404e7fb
| 9,211
|
py
|
Python
|
manila/hacking/checks.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 1
|
2015-05-28T22:28:08.000Z
|
2015-05-28T22:28:08.000Z
|
manila/hacking/checks.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 5
|
2015-08-13T15:17:28.000Z
|
2016-08-02T02:55:01.000Z
|
manila/hacking/checks.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 2
|
2015-08-29T08:19:58.000Z
|
2016-08-02T02:46:10.000Z
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
M319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "M319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test and tempest
# directories.
if ("manila/tests" in filename or "manila_tempest_tests" in filename or
"contrib/tempest" in filename):
return
if pep8.noqa(physical_line):
return
msg = "M327: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.match(logical_line):
yield (0, msg)
msg = ("M328: LOG.error and LOG.exception messages require translations "
"`_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "M329: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = "M330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "M331: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrExc(BaseASTChecker):
"""Checks for the use of str() on an exception.
This currently only handles the case where str() is used in
the scope of an exception handler. If the exception is passed
into a function, returned from an assertRaises, or used on an
exception created in the same scope, this does not catch it.
"""
CHECK_DESC = ('M325 str() cannot be used on an exception. '
'Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(check_explicit_underscore_import)
register(no_translate_debug_logs)
register(CheckForStrExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
| 36.551587
| 78
| 0.670828
|
import ast
import re
import pep8
UNDERSCORE_IMPORT_FILES = []
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
class BaseASTChecker(ast.NodeVisitor):
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
self._tree = tree
self._errors = []
def run(self):
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_debug_logs(logical_line, filename):
if logical_line.startswith("LOG.debug(_("):
yield(0, "M319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test and tempest
# directories.
if ("manila/tests" in filename or "manila_tempest_tests" in filename or
"contrib/tempest" in filename):
return
if pep8.noqa(physical_line):
return
msg = "M327: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.match(logical_line):
yield (0, msg)
msg = ("M328: LOG.error and LOG.exception messages require translations "
"`_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "M329: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = "M330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "M331: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrExc(BaseASTChecker):
CHECK_DESC = ('M325 str() cannot be used on an exception. '
'Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(check_explicit_underscore_import)
register(no_translate_debug_logs)
register(CheckForStrExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
| true
| true
|
7902638628bb0064f61e0251eaef00315f00d3ba
| 2,027
|
py
|
Python
|
GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP300_MDP20_Ctau500mm_Pythia8_13TeV_cff.py
|
jwill24/Timing
|
99c5712eae960646e02bbb796e91b584a9a96132
|
[
"MIT"
] | 2
|
2017-10-19T12:28:53.000Z
|
2019-05-22T14:36:05.000Z
|
GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP300_MDP20_Ctau500mm_Pythia8_13TeV_cff.py
|
jwill24/Timing
|
99c5712eae960646e02bbb796e91b584a9a96132
|
[
"MIT"
] | null | null | null |
GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP300_MDP20_Ctau500mm_Pythia8_13TeV_cff.py
|
jwill24/Timing
|
99c5712eae960646e02bbb796e91b584a9a96132
|
[
"MIT"
] | 6
|
2017-09-13T13:16:10.000Z
|
2019-01-28T17:39:51.000Z
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'HiddenValley:ffbar2Zv = on',
'HiddenValley:Ngauge = 3',
'4900023:mWidth = 0.01',
'HiddenValley:pTminFSR = .1',
'HiddenValley:alphaFSR = .8',
'HiddenValley:FSR = on',
'HiddenValley:fragment = on',
'HiddenValley:probVector = 0',
'PartonLevel:MPI = on',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'HadronLevel:Hadronize = on',
'4900023:onMode = off',
'4900023:onIfAny = 4900101',
'4900023:m0 = 300', #Z' mass
'4900101:m0 = .5',
'4900111:m0 = 20', #Dark Pion Mass
'4900111:mayDecay = on',
'4900111:addChannel 1 1. 0 22 22', #force dark pion to decay to diphotons
'4900111:tau0 = 500', #Dark pion lifetime in mm
'4900211:mayDecay = off',
'-4900211:mayDecay = off'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 44.065217
| 93
| 0.513567
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'HiddenValley:ffbar2Zv = on',
'HiddenValley:Ngauge = 3',
'4900023:mWidth = 0.01',
'HiddenValley:pTminFSR = .1',
'HiddenValley:alphaFSR = .8',
'HiddenValley:FSR = on',
'HiddenValley:fragment = on',
'HiddenValley:probVector = 0',
'PartonLevel:MPI = on',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'HadronLevel:Hadronize = on',
'4900023:onMode = off',
'4900023:onIfAny = 4900101',
'4900023:m0 = 300',
'4900101:m0 = .5',
'4900111:m0 = 20', #Dark Pion Mass
'4900111:mayDecay = on',
'4900111:addChannel 1 1. 0 22 22', #force dark pion to decay to diphotons
'4900111:tau0 = 500', #Dark pion lifetime in mm
'4900211:mayDecay = off',
'-4900211:mayDecay = off'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| true
| true
|
790264a7803ec3eb2265392d8e40373ae6a727be
| 2,162
|
py
|
Python
|
benchmark/startPyquil1068.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1068.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1068.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=48
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += Z(2) # number=28
prog += H(1) # number=4
prog += RX(2.664070570244145,1) # number=39
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(3) # number=40
prog += Y(4) # number=35
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=25
prog += CZ(1,0) # number=26
prog += H(0) # number=27
prog += H(0) # number=36
prog += CZ(1,0) # number=37
prog += H(0) # number=38
prog += CNOT(1,0) # number=41
prog += CNOT(1,0) # number=45
prog += X(0) # number=46
prog += CNOT(1,0) # number=47
prog += CNOT(1,0) # number=43
prog += CNOT(1,0) # number=34
prog += CNOT(1,0) # number=24
prog += CNOT(0,1) # number=29
prog += CNOT(2,3) # number=44
prog += X(1) # number=30
prog += CNOT(0,1) # number=31
prog += X(2) # number=11
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1068.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.139535
| 64
| 0.546253
|
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += H(0)
prog += Z(2)
prog += H(1)
prog += RX(2.664070570244145,1)
prog += H(2)
prog += H(3)
prog += H(4)
prog += H(0)
prog += H(3)
prog += Y(4)
prog += H(1)
prog += H(2)
prog += H(3)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += CNOT(1,0)
prog += CNOT(1,0)
prog += X(0)
prog += CNOT(1,0)
prog += CNOT(1,0)
prog += CNOT(1,0)
prog += CNOT(1,0)
prog += CNOT(0,1)
prog += CNOT(2,3)
prog += X(1)
prog += CNOT(0,1)
prog += X(2)
prog += X(3)
prog += X(0)
prog += X(1)
prog += X(2)
prog += X(3)
prog += H(0)
prog += H(1)
prog += H(2)
prog += H(3)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1068.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| false
| true
|
790264f04656fc7c67e61664be259576d99e75f7
| 3,495
|
py
|
Python
|
xsocs/gui/project/ScanPositionsItem.py
|
omserta/xsocs
|
5e1cf1352233498c48f0566e0b819e18373e95e5
|
[
"MIT"
] | null | null | null |
xsocs/gui/project/ScanPositionsItem.py
|
omserta/xsocs
|
5e1cf1352233498c48f0566e0b819e18373e95e5
|
[
"MIT"
] | null | null | null |
xsocs/gui/project/ScanPositionsItem.py
|
omserta/xsocs
|
5e1cf1352233498c48f0566e0b819e18373e95e5
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "15/09/2016"
from ...io.XsocsH5 import ScanPositions
from .ProjectItem import ProjectItem
from .ProjectDef import ItemClassDef
@ItemClassDef('ScanPositionsItem')
class ScanPositionsItem(ProjectItem):
def _createItem(self):
with self.xsocsH5 as h5f:
entries = h5f.entries()
entry = entries[0]
scan_positions = h5f.scan_positions(entry)
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
self._set_array_data(itemPath, scan_positions.pos_0)
itemPath = pathTpl.format('pos_1')
self._set_array_data(itemPath, scan_positions.pos_1)
itemPath = pathTpl.format('motor_0')
self._set_scalar_data(itemPath, scan_positions.motor_0)
itemPath = pathTpl.format('motor_1')
self._set_scalar_data(itemPath, scan_positions.motor_1)
itemPath = pathTpl.format('n_0')
self._set_scalar_data(itemPath, scan_positions.shape[0])
itemPath = pathTpl.format('n_1')
self._set_scalar_data(itemPath, scan_positions.shape[1])
def positions(self):
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
pos_0 = self._get_array_data(itemPath)
itemPath = pathTpl.format('pos_1')
pos_1 = self._get_array_data(itemPath)
itemPath = pathTpl.format('motor_0')
motor_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('motor_1')
motor_1 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_0')
n_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_1')
n_1 = self._get_scalar_data(itemPath)
return ScanPositions(motor_0=motor_0,
pos_0=pos_0,
motor_1=motor_1,
pos_1=pos_1,
shape=(n_0, n_1))
| 43.148148
| 79
| 0.621173
| true
| true
|
|
790265f034101904ace24c8650fb26d3abb22b2c
| 763
|
py
|
Python
|
tests/test_inventory_generation.py
|
matthewlchambers/standardizedinventories
|
153526d34850820ee12bf5601e2703c583a07288
|
[
"CC0-1.0"
] | null | null | null |
tests/test_inventory_generation.py
|
matthewlchambers/standardizedinventories
|
153526d34850820ee12bf5601e2703c583a07288
|
[
"CC0-1.0"
] | null | null | null |
tests/test_inventory_generation.py
|
matthewlchambers/standardizedinventories
|
153526d34850820ee12bf5601e2703c583a07288
|
[
"CC0-1.0"
] | null | null | null |
"""Test the creation of all inventories."""
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
# Create new local path
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
# skip RCRAInfo due to browswer download
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {','.join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
| 24.612903
| 85
| 0.63827
|
import stewi
from stewi.globals import paths, STEWI_VERSION, config
year = 2018
def test_inventory_generation():
paths.local_path = paths.local_path + "_" + STEWI_VERSION
error_list = []
for inventory in config()['databases']:
if inventory in ['RCRAInfo']:
continue
df = stewi.getInventory(inventory, year)
error = df is None
if not error:
error = len(df) == 0
if error:
error_list.append(inventory)
assert len(error_list) == 0, f"Generation of {','.join(error_list)} unsuccessful"
if __name__ == "__main__":
test_inventory_generation()
| true
| true
|
790266504aada4afbc51ccf3f2bedc285c45444a
| 10,945
|
py
|
Python
|
Qshop/Buyer/views.py
|
songdanlee/DjangoWorkSpace
|
5dea8601f21f5408797a8801f74b76c696a33d83
|
[
"MIT"
] | null | null | null |
Qshop/Buyer/views.py
|
songdanlee/DjangoWorkSpace
|
5dea8601f21f5408797a8801f74b76c696a33d83
|
[
"MIT"
] | 1
|
2021-05-10T11:45:52.000Z
|
2021-05-10T11:45:52.000Z
|
Qshop/Buyer/views.py
|
songdanlee/DjangoWorkSpace
|
5dea8601f21f5408797a8801f74b76c696a33d83
|
[
"MIT"
] | null | null | null |
from alipay import AliPay
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render
from django.utils.http import urlquote
from Qshop.settings import alipay_private_key_string, alipay_public_key_string
from Seller.views import getPassword
# Create your views here.
def loginValid(func):
"""
:desc 闭包函数校验是否登录
:param func:
:return:
"""
def inner(request, *args, **kwargs):
email = request.COOKIES.get("user")
s_email = request.session.get("user")
if email and s_email and email == s_email:
user = LoginUser.objects.filter(email=email).first()
if user:
return func(request, *args, **kwargs)
return HttpResponseRedirect("/Buyer/login/")
return inner
def login(request):
if request.method == "POST":
erremail = ""
email = request.POST.get("email")
pwd = request.POST.get("pwd")
user = LoginUser.objects.filter(email=email).first()
if user:
db_password = user.password
pwd = getPassword(pwd)
if db_password == pwd:
response = HttpResponseRedirect("/Buyer/index/", locals())
response.set_cookie("user", user.email)
response.set_cookie("user_id", user.id)
response.set_cookie("username", urlquote(user.username))
request.session["user"] = user.email
return response
else:
errpwd = "密码不匹配"
else:
erremail = "该邮箱未注册"
return render(request, "buyer/login.html", locals())
def register(request):
errmsg = ""
if request.method == "POST":
username = request.POST.get("user_name")
pwd = request.POST.get("pwd")
email = request.POST.get("email")
db_email = LoginUser.objects.filter(email=email).first()
db_username = LoginUser.objects.filter(username=username).first()
if not db_email:
if not db_username:
user = LoginUser()
user.username = username
user.password = getPassword(pwd)
user.email = email
user.save()
return HttpResponseRedirect("/Buyer/login/", locals())
else:
errmsg = "用户名已存在"
else:
errmsg = "邮箱已注册"
return render(request, "buyer/register.html", {"errmsg": errmsg})
def index(request):
types = GoodsType.objects.all()
goods_result = []
for type in types:
goods = type.goods_set.order_by("goods_pro_date")[0:4]
if len(goods) >= 4:
goods_result.append({type: goods})
return render(request, "buyer/index.html", locals())
def logout(request):
url = request.META.get("HTTP_REFERER", "/Buyer/index/")
response = HttpResponseRedirect(url)
cookies = request.COOKIES.keys()
for cookie in cookies:
response.delete_cookie(cookie)
if request.session.get("user"):
del request.session['user']
return response
@loginValid
def user_info(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_info.html", locals())
@loginValid
def user_site(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_site.html", locals())
@loginValid
def user_order(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
order_lists = PayOrder.objects.filter(order_user=user).order_by("-order_date")
return render(request, "buyer/user_center_order.html", locals())
"""
def good_list(request):
type = request.GET.get("type")
keyword = request.GET.get("keyword")
goods_list = []
if type == 'byid':
if keyword:
types = GoodsType.objects.get(id=keyword)
goods_list = types.goods_set.order_by("goods_pro_date")
elif type == 'bykey':
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("goods_pro_date")
if goods_list:
nums = goods_list.count()
nums = int(math.ceil(nums / 5))
recommon_list = goods_list[:nums]
return render(request, "buyer/goods_list.html", locals())
"""
def good_list(request, page):
page = int(page)
type = request.GET.get("type")
keyword = request.GET.get("keyword")
goods_list = []
if type == 'byid': # 按照商品id查
if keyword:
types = GoodsType.objects.get(id=int(keyword))
goods_list = types.goods_set.order_by("goods_pro_date")
elif type == 'bykey': # 按商品名字查
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("goods_pro_date")
if goods_list:
# 分页
page_list = Paginator(goods_list, 15)
goods_list = page_list.page(page)
pages = page_list.page_range
# 推荐商品
nums = len(goods_list)
nums = int(math.ceil(nums / 5))
recommon_list = goods_list[:nums]
return render(request, "buyer/goods_list.html", locals())
def good_detail(request, id):
good = Goods.objects.filter(id=int(id)).first()
return render(request, "buyer/detail.html", locals())
import math
import time
import datetime
from Buyer.models import *
@loginValid
def pay_order(request):
"""
get请求 商品详情页购买单个商品。传入商品id,数量。
post请求 购物车购买多个商品。
"""
if request.method == "GET":
num = request.GET.get("num")
id = request.GET.get("id")
if num and id:
num = int(num)
id = int(id)
order = PayOrder() # 订单
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.save()
good = Goods.objects.get(id=id)
order_info = OrderInfo() # 订单详情
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0 # 状态
order_info.save()
order.order_total = order_info.goods_total_price
order.save()
elif request.method == "POST":
request_data = []
data = request.POST
data_item = request.POST.items()
for key, value in data_item:
if key.startswith("check_"):
id = int(key.split("_", 1)[1])
num = int(data.get("count_" + str(id)))
request_data.append((id, num))
if request_data:
order = PayOrder() # 创建订单
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.order_total = 0.0
order.goods_number = 0
order.save()
for id, num in request_data:
good = Goods.objects.get(id=id)
order_info = OrderInfo() # 订单详情
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0
order_info.save()
order.order_total += order_info.goods_total_price # 订单总价
order.goods_number += 1 # 商品种类个数
order.save()
return render(request, "buyer/place_order.html", locals())
@loginValid
def alipayOrder(request):
"""
阿里支付,传入交易订单号,总金额
"""
order_number = request.GET.get("order_number")
total = request.GET.get("total")
# 实例化支付
alipay = AliPay(
appid="2016101200667714",
app_notify_url=None,
app_private_key_string=alipay_private_key_string,
alipay_public_key_string=alipay_public_key_string,
sign_type="RSA2"
)
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_number, # 订单编号
total_amount=str(total), # 金额 字符串类型
subject="生鲜交易",
return_url="http://127.0.0.1:8000/Buyer/pay_result/", # 支付跳转页面
notify_url="http://127.0.0.1:8000/Buyer/pay_result/",
)
result = "https://openapi.alipaydev.com/gateway.do?" + order_string
return HttpResponseRedirect(result)
@loginValid
def pay_result(request):
"""
支付结果页
如果有out_trade_no,支付成功,修改订单状态
"""
out_trade_no = request.GET.get("out_trade_no")
if out_trade_no:
payorder = PayOrder.objects.get(order_number=out_trade_no)
payorder.orderinfo_set.all().update(order_status=1)
return render(request, "buyer/pay_result.html", locals())
@loginValid
def delgood(request):
sendData = {
"code": 200,
"data": ""
}
id = request.GET.get("id")
if id:
cart = Cart.objects.get(id=id)
cart.delete()
sendData["data"] = "删除编号%s成功"%id
return JsonResponse(sendData)
@loginValid
def add_cart(request):
"""
处理ajax 请求,添加商品到购物车 ,成功保存到数据库。
传入商品id,数量
"""
sendData = {
"code": 200,
"data": ""
}
if request.method == "POST":
id = int(request.POST.get("goods_id"))
count = int(request.POST.get("count", 1))
goods = Goods.objects.get(id=id)
cart = Cart()
cart.goods_name = goods.goods_name
cart.goods_num = count
cart.goods_price = goods.goods_price
cart.goods_picture = goods.goods_picture
cart.goods_total = round(goods.goods_price * count, 3)
cart.goods_id = goods.id
cart.cart_user = request.COOKIES.get("user_id")
cart.save()
sendData['data'] = "加入购物车成功"
else:
sendData["code"] = 500
sendData["data"] = "请求方式错误"
return JsonResponse(sendData)
@loginValid
def mycart(request):
id = request.COOKIES.get("user_id")
carts = Cart.objects.filter(cart_user=id).order_by("-id")
number = carts.count()
return render(request, "buyer/cart.html", locals())
| 29.986301
| 102
| 0.604568
|
from alipay import AliPay
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render
from django.utils.http import urlquote
from Qshop.settings import alipay_private_key_string, alipay_public_key_string
from Seller.views import getPassword
def loginValid(func):
def inner(request, *args, **kwargs):
email = request.COOKIES.get("user")
s_email = request.session.get("user")
if email and s_email and email == s_email:
user = LoginUser.objects.filter(email=email).first()
if user:
return func(request, *args, **kwargs)
return HttpResponseRedirect("/Buyer/login/")
return inner
def login(request):
if request.method == "POST":
erremail = ""
email = request.POST.get("email")
pwd = request.POST.get("pwd")
user = LoginUser.objects.filter(email=email).first()
if user:
db_password = user.password
pwd = getPassword(pwd)
if db_password == pwd:
response = HttpResponseRedirect("/Buyer/index/", locals())
response.set_cookie("user", user.email)
response.set_cookie("user_id", user.id)
response.set_cookie("username", urlquote(user.username))
request.session["user"] = user.email
return response
else:
errpwd = "密码不匹配"
else:
erremail = "该邮箱未注册"
return render(request, "buyer/login.html", locals())
def register(request):
errmsg = ""
if request.method == "POST":
username = request.POST.get("user_name")
pwd = request.POST.get("pwd")
email = request.POST.get("email")
db_email = LoginUser.objects.filter(email=email).first()
db_username = LoginUser.objects.filter(username=username).first()
if not db_email:
if not db_username:
user = LoginUser()
user.username = username
user.password = getPassword(pwd)
user.email = email
user.save()
return HttpResponseRedirect("/Buyer/login/", locals())
else:
errmsg = "用户名已存在"
else:
errmsg = "邮箱已注册"
return render(request, "buyer/register.html", {"errmsg": errmsg})
def index(request):
types = GoodsType.objects.all()
goods_result = []
for type in types:
goods = type.goods_set.order_by("goods_pro_date")[0:4]
if len(goods) >= 4:
goods_result.append({type: goods})
return render(request, "buyer/index.html", locals())
def logout(request):
url = request.META.get("HTTP_REFERER", "/Buyer/index/")
response = HttpResponseRedirect(url)
cookies = request.COOKIES.keys()
for cookie in cookies:
response.delete_cookie(cookie)
if request.session.get("user"):
del request.session['user']
return response
@loginValid
def user_info(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_info.html", locals())
@loginValid
def user_site(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
return render(request, "buyer/user_center_site.html", locals())
@loginValid
def user_order(request):
id = request.COOKIES.get("user_id")
if id:
user = LoginUser.objects.filter(id=id).first()
order_lists = PayOrder.objects.filter(order_user=user).order_by("-order_date")
return render(request, "buyer/user_center_order.html", locals())
def good_list(request, page):
page = int(page)
type = request.GET.get("type")
keyword = request.GET.get("keyword")
goods_list = []
if type == 'byid':
if keyword:
types = GoodsType.objects.get(id=int(keyword))
goods_list = types.goods_set.order_by("goods_pro_date")
elif type == 'bykey':
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("goods_pro_date")
if goods_list:
page_list = Paginator(goods_list, 15)
goods_list = page_list.page(page)
pages = page_list.page_range
nums = len(goods_list)
nums = int(math.ceil(nums / 5))
recommon_list = goods_list[:nums]
return render(request, "buyer/goods_list.html", locals())
def good_detail(request, id):
good = Goods.objects.filter(id=int(id)).first()
return render(request, "buyer/detail.html", locals())
import math
import time
import datetime
from Buyer.models import *
@loginValid
def pay_order(request):
if request.method == "GET":
num = request.GET.get("num")
id = request.GET.get("id")
if num and id:
num = int(num)
id = int(id)
order = PayOrder()
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.save()
good = Goods.objects.get(id=id)
order_info = OrderInfo()
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0
order_info.save()
order.order_total = order_info.goods_total_price
order.save()
elif request.method == "POST":
request_data = []
data = request.POST
data_item = request.POST.items()
for key, value in data_item:
if key.startswith("check_"):
id = int(key.split("_", 1)[1])
num = int(data.get("count_" + str(id)))
request_data.append((id, num))
if request_data:
order = PayOrder()
order.order_number = str(time.time()).replace(".", "")
order.order_date = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.order_total = 0.0
order.goods_number = 0
order.save()
for id, num in request_data:
good = Goods.objects.get(id=id)
order_info = OrderInfo()
order_info.order_id = order
order_info.goods_id = good.id
order_info.goods_picture = good.goods_picture
order_info.goods_name = good.goods_name
order_info.goods_count = num
order_info.goods_price = good.goods_price
order_info.goods_total_price = round(good.goods_price * num, 3)
order_info.store_id = good.goods_store
order_info.order_status = 0
order_info.save()
order.order_total += order_info.goods_total_price
order.goods_number += 1
order.save()
return render(request, "buyer/place_order.html", locals())
@loginValid
def alipayOrder(request):
order_number = request.GET.get("order_number")
total = request.GET.get("total")
alipay = AliPay(
appid="2016101200667714",
app_notify_url=None,
app_private_key_string=alipay_private_key_string,
alipay_public_key_string=alipay_public_key_string,
sign_type="RSA2"
)
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_number,
total_amount=str(total),
subject="生鲜交易",
return_url="http://127.0.0.1:8000/Buyer/pay_result/",
notify_url="http://127.0.0.1:8000/Buyer/pay_result/",
)
result = "https://openapi.alipaydev.com/gateway.do?" + order_string
return HttpResponseRedirect(result)
@loginValid
def pay_result(request):
out_trade_no = request.GET.get("out_trade_no")
if out_trade_no:
payorder = PayOrder.objects.get(order_number=out_trade_no)
payorder.orderinfo_set.all().update(order_status=1)
return render(request, "buyer/pay_result.html", locals())
@loginValid
def delgood(request):
sendData = {
"code": 200,
"data": ""
}
id = request.GET.get("id")
if id:
cart = Cart.objects.get(id=id)
cart.delete()
sendData["data"] = "删除编号%s成功"%id
return JsonResponse(sendData)
@loginValid
def add_cart(request):
sendData = {
"code": 200,
"data": ""
}
if request.method == "POST":
id = int(request.POST.get("goods_id"))
count = int(request.POST.get("count", 1))
goods = Goods.objects.get(id=id)
cart = Cart()
cart.goods_name = goods.goods_name
cart.goods_num = count
cart.goods_price = goods.goods_price
cart.goods_picture = goods.goods_picture
cart.goods_total = round(goods.goods_price * count, 3)
cart.goods_id = goods.id
cart.cart_user = request.COOKIES.get("user_id")
cart.save()
sendData['data'] = "加入购物车成功"
else:
sendData["code"] = 500
sendData["data"] = "请求方式错误"
return JsonResponse(sendData)
@loginValid
def mycart(request):
id = request.COOKIES.get("user_id")
carts = Cart.objects.filter(cart_user=id).order_by("-id")
number = carts.count()
return render(request, "buyer/cart.html", locals())
| true
| true
|
790266e9a7bcf554bd70851b9a13216ab9f797e3
| 11,530
|
py
|
Python
|
src/gdata/spreadsheets/data.py
|
Cloudlock/gdata-python3
|
a6481a13590bfa225f91a97b2185cca9aacd1403
|
[
"Apache-2.0"
] | 19
|
2017-06-09T13:38:03.000Z
|
2020-12-12T07:45:48.000Z
|
src/gdata/spreadsheets/data.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 11
|
2017-07-22T07:09:54.000Z
|
2020-12-02T15:08:48.000Z
|
src/gdata/spreadsheets/data.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 25
|
2017-07-03T11:30:39.000Z
|
2020-10-01T02:21:13.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
"""Extracts the spreadsheet key unique to this spreadsheet."""
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
"""The worksheet ID identifies this worksheet in its spreadsheet."""
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
"""Converts this row to a mapping of column names to their values."""
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
"""Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
"""
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
"""Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
"""
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
"""
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| 31.162162
| 82
| 0.674761
|
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
class Cell(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
entry = [Table]
class Record(gdata.data.GDEntry):
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
entry = [Record]
class ListRow(atom.core.XmlElement):
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
def get_value(self, column_name):
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| true
| true
|
790267a642be841e70d1a8236f4049183b14d8ba
| 541
|
py
|
Python
|
quiz/migrations/0003_auto_20191120_2238.py
|
chgo19/KnowledgeAppOne
|
7a61bba49d984e9736a39ddc31737eb58eba1e23
|
[
"MIT"
] | null | null | null |
quiz/migrations/0003_auto_20191120_2238.py
|
chgo19/KnowledgeAppOne
|
7a61bba49d984e9736a39ddc31737eb58eba1e23
|
[
"MIT"
] | null | null | null |
quiz/migrations/0003_auto_20191120_2238.py
|
chgo19/KnowledgeAppOne
|
7a61bba49d984e9736a39ddc31737eb58eba1e23
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-11-20 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0002_question_image'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='answer',
),
migrations.AddField(
model_name='choice',
name='is_correct',
field=models.BooleanField(default=False, help_text='Mark right if this is the right choice'),
),
]
| 23.521739
| 105
| 0.5878
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0002_question_image'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='answer',
),
migrations.AddField(
model_name='choice',
name='is_correct',
field=models.BooleanField(default=False, help_text='Mark right if this is the right choice'),
),
]
| true
| true
|
79026b31bd5e8b437c6118231c44de5923735f31
| 638
|
py
|
Python
|
DCS311 Artificial Intelligence/KNN/lab1_code/M3/dist.py
|
Lan-Jing/Courses
|
540db9499b8725ca5b82a2c4e7a3da09f73c0efa
|
[
"MIT"
] | 1
|
2021-12-17T23:09:00.000Z
|
2021-12-17T23:09:00.000Z
|
DCS311 Artificial Intelligence/KNN/lab1_code/M3/dist.py
|
Lan-Jing/Courses
|
540db9499b8725ca5b82a2c4e7a3da09f73c0efa
|
[
"MIT"
] | null | null | null |
DCS311 Artificial Intelligence/KNN/lab1_code/M3/dist.py
|
Lan-Jing/Courses
|
540db9499b8725ca5b82a2c4e7a3da09f73c0efa
|
[
"MIT"
] | 1
|
2021-08-03T23:42:06.000Z
|
2021-08-03T23:42:06.000Z
|
# module for distance computation;
import numpy as np
def dist(arraya, arrayb, mode):
if mode == 0:
dis = np.sum(np.abs(np.subtract(arraya, arrayb)))
elif mode == 1:
dis = np.sqrt(np.sum(np.power(np.subtract(arraya, arrayb), 2)))
else:
dis = 1 - np.dot(arraya, arrayb) / np.sqrt(np.sum(np.power(arraya, 2)) * np.sum(np.power(arrayb, 2)))
return dis
def corr(arraya, arrayb, show):
a = np.subtract(arraya, np.mean(arraya))
b = np.subtract(arrayb, np.mean(arrayb))
corr = np.sum(np.multiply(a, b)) / np.sqrt(np.multiply(np.sum(np.power(a, 2)), np.sum(np.power(b, 2))))
return corr
| 35.444444
| 109
| 0.619122
|
import numpy as np
def dist(arraya, arrayb, mode):
if mode == 0:
dis = np.sum(np.abs(np.subtract(arraya, arrayb)))
elif mode == 1:
dis = np.sqrt(np.sum(np.power(np.subtract(arraya, arrayb), 2)))
else:
dis = 1 - np.dot(arraya, arrayb) / np.sqrt(np.sum(np.power(arraya, 2)) * np.sum(np.power(arrayb, 2)))
return dis
def corr(arraya, arrayb, show):
a = np.subtract(arraya, np.mean(arraya))
b = np.subtract(arrayb, np.mean(arrayb))
corr = np.sum(np.multiply(a, b)) / np.sqrt(np.multiply(np.sum(np.power(a, 2)), np.sum(np.power(b, 2))))
return corr
| true
| true
|
79026c237d69a77f4bba9d757a7640288ac10253
| 9,207
|
py
|
Python
|
thor_client/experiment_client.py
|
JamesBrofos/Thor-Python-Client
|
2cf1c2a876cf1dcce391ad2f33c8572cb0acd2e7
|
[
"MIT"
] | null | null | null |
thor_client/experiment_client.py
|
JamesBrofos/Thor-Python-Client
|
2cf1c2a876cf1dcce391ad2f33c8572cb0acd2e7
|
[
"MIT"
] | 2
|
2017-08-04T15:59:35.000Z
|
2021-06-01T22:06:25.000Z
|
thor_client/experiment_client.py
|
JamesBrofos/Thor-Python-Client
|
2cf1c2a876cf1dcce391ad2f33c8572cb0acd2e7
|
[
"MIT"
] | 2
|
2017-06-23T18:46:03.000Z
|
2019-11-05T19:27:47.000Z
|
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
| 44.912195
| 80
| 0.641686
|
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
| true
| true
|
79026ddcc57a39b498a7d5c819166bc735607d4c
| 1,757
|
py
|
Python
|
digital_image_processing/filters/gaussian_filter.py
|
czuo0303/Python
|
4b71e2647b38f146e795a645957b311905c6c33f
|
[
"MIT"
] | 7
|
2020-03-29T08:20:07.000Z
|
2022-03-19T06:41:19.000Z
|
digital_image_processing/filters/gaussian_filter.py
|
Mathewsmusukuma/Python
|
4866b1330bc7c77c0ed0e050e6b99efdeb026448
|
[
"MIT"
] | 8
|
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
digital_image_processing/filters/gaussian_filter.py
|
Mathewsmusukuma/Python
|
4866b1330bc7c77c0ed0e050e6b99efdeb026448
|
[
"MIT"
] | 6
|
2020-05-31T20:40:50.000Z
|
2021-12-23T04:52:15.000Z
|
"""
Implementation of gaussian filter algorithm
"""
from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imshow, waitKey
from numpy import pi, mgrid, exp, square, zeros, ravel, dot, uint8
from itertools import product
def gen_gaussian_kernel(k_size, sigma):
center = k_size // 2
x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma)))
return g
def gaussian_filter(image, k_size, sigma):
height, width = image.shape[0], image.shape[1]
# dst image height and width
dst_height = height - k_size + 1
dst_width = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
image_array = zeros((dst_height * dst_width, k_size * k_size))
row = 0
for i, j in product(range(dst_height), range(dst_width)):
window = ravel(image[i : i + k_size, j : j + k_size])
image_array[row, :] = window
row += 1
# turn the kernel into shape(k*k, 1)
gaussian_kernel = gen_gaussian_kernel(k_size, sigma)
filter_array = ravel(gaussian_kernel)
# reshape and get the dst image
dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8)
return dst
if __name__ == "__main__":
# read original image
img = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
gaussian3x3 = gaussian_filter(gray, 3, sigma=1)
gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussian3x3)
imshow("gaussian filter with 5x5 mask", gaussian5x5)
waitKey()
| 32.537037
| 85
| 0.669892
|
from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imshow, waitKey
from numpy import pi, mgrid, exp, square, zeros, ravel, dot, uint8
from itertools import product
def gen_gaussian_kernel(k_size, sigma):
center = k_size // 2
x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma)))
return g
def gaussian_filter(image, k_size, sigma):
height, width = image.shape[0], image.shape[1]
dst_height = height - k_size + 1
dst_width = width - k_size + 1
image_array = zeros((dst_height * dst_width, k_size * k_size))
row = 0
for i, j in product(range(dst_height), range(dst_width)):
window = ravel(image[i : i + k_size, j : j + k_size])
image_array[row, :] = window
row += 1
gaussian_kernel = gen_gaussian_kernel(k_size, sigma)
filter_array = ravel(gaussian_kernel)
dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8)
return dst
if __name__ == "__main__":
img = imread(r"../image_data/lena.jpg")
gray = cvtColor(img, COLOR_BGR2GRAY)
gaussian3x3 = gaussian_filter(gray, 3, sigma=1)
gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8)
imshow("gaussian filter with 3x3 mask", gaussian3x3)
imshow("gaussian filter with 5x5 mask", gaussian5x5)
waitKey()
| true
| true
|
79026e40c9b60495f76ac27b1488c09fb6a0b276
| 102
|
py
|
Python
|
UniGrammarRuntime/backends/__init__.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
UniGrammarRuntime/backends/__init__.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
UniGrammarRuntime/backends/__init__.py
|
UniGrammar/UniGrammarRuntime.py
|
58097f1d03f35c346a0534d1eb821b98edd25ad5
|
[
"Unlicense"
] | null | null | null |
from .multilanguage import antlr4, waxeye
from .python import TatSu, arpeggio, parglare, parsimonious
| 34
| 59
| 0.823529
|
from .multilanguage import antlr4, waxeye
from .python import TatSu, arpeggio, parglare, parsimonious
| true
| true
|
79026f8d54ffa067cedca51853fd88f5cc6dbbb2
| 1,392
|
py
|
Python
|
src/schemathesis/cli/options.py
|
PrayagS/schemathesis
|
80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b
|
[
"MIT"
] | 1
|
2021-03-24T08:55:10.000Z
|
2021-03-24T08:55:10.000Z
|
src/schemathesis/cli/options.py
|
PrayagS/schemathesis
|
80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b
|
[
"MIT"
] | null | null | null |
src/schemathesis/cli/options.py
|
PrayagS/schemathesis
|
80eb0a689ca197a0999e80b35d5dcbbbd88ddf4b
|
[
"MIT"
] | 1
|
2021-07-13T11:01:38.000Z
|
2021-07-13T11:01:38.000Z
|
from enum import Enum
from typing import List, Optional, Type, Union
import click
from ..types import NotSet
class CSVOption(click.Choice):
def __init__(self, choices: Type[Enum]):
self.enum = choices
super().__init__(tuple(choices.__members__))
def convert(
self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]
) -> List[Enum]:
items = [item for item in value.split(",") if item]
invalid_options = set(items) - set(self.choices)
if not invalid_options and items:
return [self.enum[item] for item in items]
# Sort to keep the error output consistent with the passed values
sorted_options = ", ".join(sorted(invalid_options, key=items.index))
available_options = ", ".join(self.choices)
self.fail(f"invalid choice(s): {sorted_options}. Choose from {available_options}")
not_set = NotSet()
class OptionalInt(click.types.IntRange):
def convert( # type: ignore
self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]
) -> Union[int, NotSet]:
if value == "None":
return not_set
try:
int(value)
return super().convert(value, param, ctx)
except ValueError:
self.fail("%s is not a valid integer or None" % value, param, ctx)
| 33.95122
| 98
| 0.640805
|
from enum import Enum
from typing import List, Optional, Type, Union
import click
from ..types import NotSet
class CSVOption(click.Choice):
def __init__(self, choices: Type[Enum]):
self.enum = choices
super().__init__(tuple(choices.__members__))
def convert(
self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]
) -> List[Enum]:
items = [item for item in value.split(",") if item]
invalid_options = set(items) - set(self.choices)
if not invalid_options and items:
return [self.enum[item] for item in items]
sorted_options = ", ".join(sorted(invalid_options, key=items.index))
available_options = ", ".join(self.choices)
self.fail(f"invalid choice(s): {sorted_options}. Choose from {available_options}")
not_set = NotSet()
class OptionalInt(click.types.IntRange):
def convert(
self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context]
) -> Union[int, NotSet]:
if value == "None":
return not_set
try:
int(value)
return super().convert(value, param, ctx)
except ValueError:
self.fail("%s is not a valid integer or None" % value, param, ctx)
| true
| true
|
79026fabd03fab0abe84bbc68e27af2a6cc9bee9
| 1,866
|
py
|
Python
|
tests/basics/Unpacking35.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | null | null | null |
tests/basics/Unpacking35.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | null | null | null |
tests/basics/Unpacking35.py
|
Mortal/Nuitka
|
5150eeff7ff845ed4993c773449cd81b7f127c6b
|
[
"Apache-2.0"
] | 1
|
2018-12-16T23:51:18.000Z
|
2018-12-16T23:51:18.000Z
|
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def tupleUnpacking():
return (*a, b, *c)
def listUnpacking():
return [*a, b, *c]
def setUnpacking():
return {*a, b, *c}
def dictUnpacking():
return {"a" : 1, **d}
a = range(3)
b = 5
c = range(8,10)
d = {"a" : 2}
print("Tuple unpacked", tupleUnpacking())
print("List unpacked", listUnpacking())
print("Set unpacked", setUnpacking())
print("Dict unpacked", dictUnpacking())
non_iterable = 2.0
def tupleUnpackingError():
try:
return (*a,*non_iterable,*c)
except Exception as e:
return e
def listUnpackingError():
try:
return [*a,*non_iterable,*c]
except Exception as e:
return e
def setUnpackingError():
try:
return {*a,*non_iterable,*c}
except Exception as e:
return e
def dictUnpackingError():
try:
return {"a" : 1, **non_iterable}
except Exception as e:
return e
print("Tuple unpacked error:", tupleUnpackingError())
print("List unpacked error:", listUnpackingError())
print("Set unpacked error:", setUnpackingError())
print("Dict unpacked error:", dictUnpackingError())
| 25.561644
| 79
| 0.665595
|
def tupleUnpacking():
return (*a, b, *c)
def listUnpacking():
return [*a, b, *c]
def setUnpacking():
return {*a, b, *c}
def dictUnpacking():
return {"a" : 1, **d}
a = range(3)
b = 5
c = range(8,10)
d = {"a" : 2}
print("Tuple unpacked", tupleUnpacking())
print("List unpacked", listUnpacking())
print("Set unpacked", setUnpacking())
print("Dict unpacked", dictUnpacking())
non_iterable = 2.0
def tupleUnpackingError():
try:
return (*a,*non_iterable,*c)
except Exception as e:
return e
def listUnpackingError():
try:
return [*a,*non_iterable,*c]
except Exception as e:
return e
def setUnpackingError():
try:
return {*a,*non_iterable,*c}
except Exception as e:
return e
def dictUnpackingError():
try:
return {"a" : 1, **non_iterable}
except Exception as e:
return e
print("Tuple unpacked error:", tupleUnpackingError())
print("List unpacked error:", listUnpackingError())
print("Set unpacked error:", setUnpackingError())
print("Dict unpacked error:", dictUnpackingError())
| true
| true
|
7902723262687509437cdd68aacb60a0299cb577
| 3,773
|
py
|
Python
|
pub/permission/resource.py
|
DASTUDIO/MyVHost
|
b9eda56a67c2df9236b7866087bc7f465542f951
|
[
"MIT"
] | 2
|
2021-07-27T10:38:57.000Z
|
2021-10-10T20:42:56.000Z
|
pub/permission/resource.py
|
DASTUDIO/MyVHost
|
b9eda56a67c2df9236b7866087bc7f465542f951
|
[
"MIT"
] | null | null | null |
pub/permission/resource.py
|
DASTUDIO/MyVHost
|
b9eda56a67c2df9236b7866087bc7f465542f951
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from pub.tables.resources import *
from pub.tables.user import *
import pub.client.login as login
from pub.permission.user import is_logged,is_owner
def is_valid_key(key, r_type):
try:
resource_type.objects.get(key=key)
return False
except:
pass
try:
resource_info.objects.get(key=key)
return False
except:
pass
if (r_type == -1):
return True
try:
if(r_type==s.RESOURCE_TYPE_CUSTOMED):
resource_customed.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_TEMPLATED):
resource_templated.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_RESTFUL_API):
resource_restful.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_IFRAME):
resource_iframe.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_SHORT_LINK):
resource_link.objects.get(key=key)
return False
else:
return False
except:
return True
def set_permission(key,readable,writeable,modifiable,token=''):
try:
res = resource_permission.objects.get(key=key)
res.delete()
raise Exception()
except:
resource_permission.objects.create(key=key,readable=readable,writeable=writeable,modifiable=modifiable,token=token)
def can_read(request,key,token=''):
try:
readable,_,_,verify_token =__get_resource_permission(key)
return __accessibility_verfy(readable,request,key,token,verify_token)
except:
return False
def can_write(request,key,token=''):
try:
_,writeable,_,verify_token = __get_resource_permission(key)
return __accessibility_verfy(writeable,request,key,token,verify_token)
except:
return False
def can_modify(request,key,token=''):
try:
_,_,modifiable,verify_token = __get_resource_permission(key)
return __accessibility_verfy(modifiable,request,key,token,verify_token)
except:
return False
def can_create(request, r_type):
if not is_logged(request):
return False
return True
#
# try:
# user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
# except:
# return False
#
# p = user_permission.objects.get(user_id=user, type=r_type).volume
#
# if p>0:
# return True
#
# return False
def did_create(request,r_type):
if is_logged(request):
user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
p = user_permission.objects.get(user_id=user, type=r_type)
p.volume = p.volume - 1
p.save()
def __get_resource_permission(key):
p = resource_permission.objects.get(key=key)
readable = p.readable
writeable = p.writeable
modifiable = p.modifiable
token = p.token
return readable, writeable, modifiable, token
def __accessibility_verfy(accessibility, request, key, token, verify_token):
if accessibility == s.ACCESSIBILITY_PUBLIC:
return True
elif accessibility == s.ACCESSIBILITY_LOGIN or accessibility == s.ACCESSIBILITY_LOGIN_OR_TOKEN:
if is_logged(request):
return True
else:
if token != '':
if token == verify_token:
return True
elif accessibility == s.ACCESSIBILITY_PRIVATE:
if is_logged(request):
if is_owner(request, key):
return True
return False
elif accessibility == s.ACCESSIBILITY_TOKEN:
if token != '':
if token == verify_token:
return True
| 26.384615
| 123
| 0.640074
|
from pub.tables.resources import *
from pub.tables.user import *
import pub.client.login as login
from pub.permission.user import is_logged,is_owner
def is_valid_key(key, r_type):
try:
resource_type.objects.get(key=key)
return False
except:
pass
try:
resource_info.objects.get(key=key)
return False
except:
pass
if (r_type == -1):
return True
try:
if(r_type==s.RESOURCE_TYPE_CUSTOMED):
resource_customed.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_TEMPLATED):
resource_templated.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_RESTFUL_API):
resource_restful.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_IFRAME):
resource_iframe.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_SHORT_LINK):
resource_link.objects.get(key=key)
return False
else:
return False
except:
return True
def set_permission(key,readable,writeable,modifiable,token=''):
try:
res = resource_permission.objects.get(key=key)
res.delete()
raise Exception()
except:
resource_permission.objects.create(key=key,readable=readable,writeable=writeable,modifiable=modifiable,token=token)
def can_read(request,key,token=''):
try:
readable,_,_,verify_token =__get_resource_permission(key)
return __accessibility_verfy(readable,request,key,token,verify_token)
except:
return False
def can_write(request,key,token=''):
try:
_,writeable,_,verify_token = __get_resource_permission(key)
return __accessibility_verfy(writeable,request,key,token,verify_token)
except:
return False
def can_modify(request,key,token=''):
try:
_,_,modifiable,verify_token = __get_resource_permission(key)
return __accessibility_verfy(modifiable,request,key,token,verify_token)
except:
return False
def can_create(request, r_type):
if not is_logged(request):
return False
return True
def did_create(request,r_type):
if is_logged(request):
user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
p = user_permission.objects.get(user_id=user, type=r_type)
p.volume = p.volume - 1
p.save()
def __get_resource_permission(key):
p = resource_permission.objects.get(key=key)
readable = p.readable
writeable = p.writeable
modifiable = p.modifiable
token = p.token
return readable, writeable, modifiable, token
def __accessibility_verfy(accessibility, request, key, token, verify_token):
if accessibility == s.ACCESSIBILITY_PUBLIC:
return True
elif accessibility == s.ACCESSIBILITY_LOGIN or accessibility == s.ACCESSIBILITY_LOGIN_OR_TOKEN:
if is_logged(request):
return True
else:
if token != '':
if token == verify_token:
return True
elif accessibility == s.ACCESSIBILITY_PRIVATE:
if is_logged(request):
if is_owner(request, key):
return True
return False
elif accessibility == s.ACCESSIBILITY_TOKEN:
if token != '':
if token == verify_token:
return True
| true
| true
|
79027255f2380da67f8d9c7da58a3bd5dccd5411
| 11,619
|
py
|
Python
|
MetamorphicTests/all_mutants/sales_forecasting_file/273.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/273.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/273.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | 1
|
2022-02-06T14:59:43.000Z
|
2022-02-06T14:59:43.000Z
|
def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis')
| 31.150134
| 232
| 0.62897
|
def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis')
| true
| true
|
790272a3ad93584e9707d0533955430526f5f09d
| 154
|
py
|
Python
|
src/enquiries/apps.py
|
kkamara/django-app
|
5e6e3e3200c7f01d1c3a67e3bcb5835d8db1ffff
|
[
"BSD-3-Clause"
] | null | null | null |
src/enquiries/apps.py
|
kkamara/django-app
|
5e6e3e3200c7f01d1c3a67e3bcb5835d8db1ffff
|
[
"BSD-3-Clause"
] | null | null | null |
src/enquiries/apps.py
|
kkamara/django-app
|
5e6e3e3200c7f01d1c3a67e3bcb5835d8db1ffff
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class EnquiriesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'src.enquiries'
| 22
| 56
| 0.766234
|
from django.apps import AppConfig
class EnquiriesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'src.enquiries'
| true
| true
|
790274b8ec9a71f78ebd00280c297b3198699f34
| 7,718
|
py
|
Python
|
tests/test_format_detection.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | null | null | null |
tests/test_format_detection.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | null | null | null |
tests/test_format_detection.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | 1
|
2021-12-15T22:15:59.000Z
|
2021-12-15T22:15:59.000Z
|
from unittest import TestCase
import os.path as osp
from datumaro.components.format_detection import (
FormatDetectionConfidence, FormatRequirementsUnmet, apply_format_detector,
)
from datumaro.util.test_utils import TestDir
from tests.requirements import Requirements, mark_requirement
class FormatDetectionTest(TestCase):
def setUp(self) -> None:
test_dir_context = TestDir()
self._dataset_root = test_dir_context.__enter__()
self.addCleanup(test_dir_context.__exit__, None, None, None)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_empty_detector(self):
result = apply_format_detector(self._dataset_root, lambda c: None)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_custom_confidence(self):
result = apply_format_detector(self._dataset_root,
lambda c: FormatDetectionConfidence.LOW)
self.assertEqual(result, FormatDetectionConfidence.LOW)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_root_path(self):
provided_root = None
def detect(context):
nonlocal provided_root
provided_root = context.root_path
apply_format_detector(self._dataset_root, detect)
self.assertEqual(provided_root, self._dataset_root)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_fail(self):
def detect(context):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives, ('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
selected_file = None
def detect(context):
nonlocal selected_file
selected_file = context.require_file('**/[fg]oo*.t?t')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(selected_file, 'foobar.txt')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_failure(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('*/*')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('*/*', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_one(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames='*.txt')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_many(self):
for ext in ('txt', 'lst'):
with open(osp.join(self._dataset_root, f'foobar.{ext}'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames=('*.txt', '*.lst'))
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
self.assertIn('*.lst', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w') as f:
print('123', file=f)
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde') as f:
if next(f) != '123\n':
raise Exception
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_bad_file(self):
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
pass
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_exception(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
raise Exception
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_nested_req(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_success(self):
alternatives_executed = set()
def detect(context):
nonlocal alternatives_executed
with context.require_any():
with context.alternative():
alternatives_executed.add(1)
context.fail('bad alternative 1')
with context.alternative():
alternatives_executed.add(2)
# good alternative 2
with context.alternative():
alternatives_executed.add(3)
context.fail('bad alternative 3')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(alternatives_executed, {1, 2, 3})
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_failure(self):
def detect(context):
with context.require_any():
with context.alternative():
context.fail('bad alternative 1')
with context.alternative():
context.fail('bad alternative 2')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('bad alternative 1', 'bad alternative 2'))
| 38.207921
| 79
| 0.671029
|
from unittest import TestCase
import os.path as osp
from datumaro.components.format_detection import (
FormatDetectionConfidence, FormatRequirementsUnmet, apply_format_detector,
)
from datumaro.util.test_utils import TestDir
from tests.requirements import Requirements, mark_requirement
class FormatDetectionTest(TestCase):
def setUp(self) -> None:
test_dir_context = TestDir()
self._dataset_root = test_dir_context.__enter__()
self.addCleanup(test_dir_context.__exit__, None, None, None)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_empty_detector(self):
result = apply_format_detector(self._dataset_root, lambda c: None)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_custom_confidence(self):
result = apply_format_detector(self._dataset_root,
lambda c: FormatDetectionConfidence.LOW)
self.assertEqual(result, FormatDetectionConfidence.LOW)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_root_path(self):
provided_root = None
def detect(context):
nonlocal provided_root
provided_root = context.root_path
apply_format_detector(self._dataset_root, detect)
self.assertEqual(provided_root, self._dataset_root)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_fail(self):
def detect(context):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives, ('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
selected_file = None
def detect(context):
nonlocal selected_file
selected_file = context.require_file('**/[fg]oo*.t?t')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(selected_file, 'foobar.txt')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_failure(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('*/*')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('*/*', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_one(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames='*.txt')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_many(self):
for ext in ('txt', 'lst'):
with open(osp.join(self._dataset_root, f'foobar.{ext}'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames=('*.txt', '*.lst'))
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
self.assertIn('*.lst', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w') as f:
print('123', file=f)
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde') as f:
if next(f) != '123\n':
raise Exception
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_bad_file(self):
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
pass
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_exception(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
raise Exception
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_nested_req(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_success(self):
alternatives_executed = set()
def detect(context):
nonlocal alternatives_executed
with context.require_any():
with context.alternative():
alternatives_executed.add(1)
context.fail('bad alternative 1')
with context.alternative():
alternatives_executed.add(2)
with context.alternative():
alternatives_executed.add(3)
context.fail('bad alternative 3')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(alternatives_executed, {1, 2, 3})
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_failure(self):
def detect(context):
with context.require_any():
with context.alternative():
context.fail('bad alternative 1')
with context.alternative():
context.fail('bad alternative 2')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('bad alternative 1', 'bad alternative 2'))
| true
| true
|
790274cf702c209f94130afcab2123d5017ee04d
| 511
|
py
|
Python
|
tests/v0x04/test_controller2switch/test_port_stats.py
|
smythtech/python-openflow-legacy
|
f4ddb06ac8c98f074c04f027df4b52542e41c123
|
[
"MIT"
] | null | null | null |
tests/v0x04/test_controller2switch/test_port_stats.py
|
smythtech/python-openflow-legacy
|
f4ddb06ac8c98f074c04f027df4b52542e41c123
|
[
"MIT"
] | null | null | null |
tests/v0x04/test_controller2switch/test_port_stats.py
|
smythtech/python-openflow-legacy
|
f4ddb06ac8c98f074c04f027df4b52542e41c123
|
[
"MIT"
] | null | null | null |
"""Config Port Stats message tests."""
from pyof.v0x04.controller2switch.common import PortStats
from tests.test_struct import TestStruct
class TestPortStats(TestStruct):
"""Config Port Stats message tests."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112)
| 31.9375
| 75
| 0.702544
|
from pyof.v0x04.controller2switch.common import PortStats
from tests.test_struct import TestStruct
class TestPortStats(TestStruct):
@classmethod
def setUpClass(cls):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112)
| true
| true
|
790275099919bc1e4a9c1f07d185a1ed91e196d3
| 38,309
|
py
|
Python
|
src/m1_Line.py
|
jarskijr/10-MoreImplementingClasses
|
b21f0b5c6380c65b474eb6cba8bed55870d9809b
|
[
"MIT"
] | null | null | null |
src/m1_Line.py
|
jarskijr/10-MoreImplementingClasses
|
b21f0b5c6380c65b474eb6cba8bed55870d9809b
|
[
"MIT"
] | null | null | null |
src/m1_Line.py
|
jarskijr/10-MoreImplementingClasses
|
b21f0b5c6380c65b474eb6cba8bed55870d9809b
|
[
"MIT"
] | null | null | null |
"""
A simple Line class.
NOTE: This is NOT rosegraphics -- it is your OWN Line class.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Jacob Jarski.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
import m1t_test_Line as m1t
###############################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
###############################################################################
# -----------------------------------------------------------------------------
# DONE: 2. Right-click on the src folder and
# Mark Directory as ... Sources Root,
# if you have not already done so.
#
# Then, with your instructor, READ THE INSTRUCTIONS in file
# m0_INSTRUCTIONS.txt
# asking questions as needed. Once you understand the instructions,
# mark this _TODO_ as DONE.
# -----------------------------------------------------------------------------
###############################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
###############################################################################
def main():
"""
Calls the TEST functions in this module, but ONLY if the method
to be tested has at least a partial implementation. That is,
a TEST function will not be called until you begin work
on the code that it is testing.
"""
if m1t.is_implemented('__init__'):
run_test_init()
if m1t.is_implemented('clone'):
run_test_clone()
if m1t.is_implemented('reverse'):
run_test_reverse()
if m1t.is_implemented('slope'):
run_test_slope()
if m1t.is_implemented('length'):
run_test_length()
if m1t.is_implemented('get_number_of_clones'):
run_test_get_number_of_clones()
if m1t.is_implemented('line_plus'):
run_test_line_plus()
if m1t.is_implemented('line_minus'):
run_test_line_minus()
if m1t.is_implemented('midpoint'):
run_test_midpoint()
if m1t.is_implemented('is_parallel'):
run_test_is_parallel()
if m1t.is_implemented('reset'):
run_test_reset()
###############################################################################
# Students:
# Do NOT touch the following Point class - it has no TO DO.
# Do NOT copy code from the methods in this Point class.
#
# DO ** READ ** this Point class,
# asking questions about any of it that you do not understand.
#
# DO ** CALL ** methods in this Point class as needed
# in implementing and testing the methods of the ** Line ** class.
#
# IMPORTANT, IMPORTANT, IMPORTANT:
# *** In your ** Line ** class methods, you should NEVER have code
# *** that a ** Point ** class method could do for you.
###############################################################################
# The Point class (and its methods) begins here.
###############################################################################
class Point(object):
""" Represents a point in 2-dimensional space. """
def __init__(self, x, y):
""" Sets instance variables x and y to the given coordinates. """
self.x = x
self.y = y
def __repr__(self):
"""
Returns a string representation of this Point.
For each coordinate (x and y), the representation:
- Uses no decimal points if the number is close to an integer,
- Else it uses 2 decimal places after the decimal point.
Examples:
Point(10, 3.14)
Point(3.01, 2.99)
"""
decimal_places = 2 # Use 2 places after the decimal point
formats = []
numbers = []
for coordinate in (self.x, self.y):
if abs(coordinate - round(coordinate)) < (10 ** -decimal_places):
# Treat it as an integer:
formats.append('{}')
numbers.append(round(coordinate))
else:
# Treat it as a float to decimal_places decimal places:
formats.append('{:.' + str(decimal_places) + 'f}')
numbers.append(round(coordinate, decimal_places))
format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')'
return format_string.format(numbers[0], numbers[1])
def __eq__(self, p2):
"""
Defines == for Points: a == b is equivalent to a.__eq__(b).
Treats two numbers as "equal" if they are within 6 decimal
places of each other for both x and y coordinates.
"""
return (round(self.x, 6) == round(p2.x, 6) and
round(self.y, 6) == round(p2.y, 6))
def clone(self):
""" Returns a new Point at the same (x, y) as this Point. """
return Point(self.x, self.y)
def distance_from(self, p2):
""" Returns the distance this Point is from the given Point. """
dx_squared = (self.x - p2.x) ** 2
dy_squared = (self.y - p2.y) ** 2
return math.sqrt(dx_squared + dy_squared)
def halfway_to(self, p2):
"""
Given another Point object p2, returns a new Point
that is half-way between this Point and the given Point (p2).
"""
return Point((self.x + p2.x) / 2,
(self.y + p2.y) / 2)
def plus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
PLUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.plus(p2)
print(p3)
would print: Point(600, 33)
"""
return Point(self.x + p2.x, self.y + p2.y)
def minus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
MINUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.minus(p2)
print(p3)
would print: Point(400, 7)
"""
return Point(self.x - p2.x, self.y - p2.y)
###############################################################################
# The Line class (and its methods) begins here.
###############################################################################
class Line(object):
""" Represents a line segment in 2-dimensional space. """
def __init__(self, start, end):
self.start = start.clone()
self.originalstart = start.clone()
self.end = end.clone()
self.originalend = end.clone()
self.timescloned = 0
"""
What comes in:
-- self
-- a Point object named start
-- a Point object named end
where the two Points are to be the initial start and end points,
respectively, of this Line.
What goes out: Nothing (i.e., None).
Side effects: MUTATEs this Line by setting two instance
variables named:
-- start
-- end
to CLONES of the two Point arguments, respectively.
Other methods must maintain those instance variables as needed
so that they always indicate the CURRENT start and end points
of this Line.
Also, initializes other instance variables as needed
by other Line methods.
Example: This __init__ method runs when one constructs
a Line. So the 3rd of the following statements
invokes the __init__ method of this Line class:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
Type hints:
:type start: Point
:type end: Point
"""
# ---------------------------------------------------------------------
# DONE: 3.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def __repr__(self):
"""
What comes in:
-- self
What goes out: Returns a string representation of this Line,
in the form:
Line[(x1, y1), (x2, y2)]
Side effects: None.
Note: print(BLAH) causes BLAH's __repr__ to be called.
BLAH's __repr__ returns a string,
which the print function then prints.
Example: Since the print function calls __repr__ on the
object to be printed:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
# The following statement causes __repr__ to run,
# hence should print: Line[(30, 17), (50, 80)]
print(line)
Type hints:
:rtype: str
"""
# ---------------------------------------------------------------------
# We have already implemented this __repr__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
start = repr(self.start).replace('Point', '')
end = repr(self.end).replace('Point', '')
return 'Line[{}, {}]'.format(start, end)
def __eq__(self, line2):
"""
What comes in:
-- self
-- a Line object
What goes out: Returns True if:
this Line's start point is equal to line2's start point AND
this Line's end point is equal to line2's end point.
Returns False otherwise.
Side effects: None.
Note: a == b is equivalent to a.__eq__(b).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = Line(p1, p2)
line3 = Line(p2, p1)
print(line1 == line1) # Should print: True
print(line1 == line2) # Should print: True
print(line1 == line3) # Should print: False
line1.start = Point(0, 0)
print(line1 == line2) # Should now print: False
Type hints:
:type line2: Line
:rtype: bool
"""
# ---------------------------------------------------------------------
# We have already implemented this __eq__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
return (self.start == line2.start) and (self.end == line2.end)
def clone(self):
self.timescloned = self.timescloned + 1
clone = Line(self.start, self.end)
return clone
"""
What comes in:
-- self
What goes out: Returns a new Line whose START is a clone of
this Line's START and whose END is a clone of this Line's END.
Side effects: None.
Example:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
Type hints:
:rtype: Line
"""
# ---------------------------------------------------------------------
# DONE: 4.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def reverse(self):
reversestart = self.end
reverseend = self.start
self.start = reversestart
self.end = reverseend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its direction is reversed
(that is, its start and end points are swapped).
** Must NOT mutate its start and end points -- just SWAP them. **
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
"""
# ---------------------------------------------------------------------
# DONE: 5.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def slope(self):
slopex = (self.end.x-self.start.x)
slopey = (self.end.y-self.start.y)
if slopex == 0:
return math.inf
else:
return slopey/slopex
"""
What comes in:
-- self
What goes out: Returns the slope of this Line, or
math.inf
if the line is vertical (i.e., has "infinite" slope).
Side effects: None.
Examples:
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 6.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def length(self):
length = math.sqrt(((self.start.x- self.end.x) ** 2) + ((self.start.y - self.end.y) ** 2))
return length
"""
What comes in:
-- self
What goes out: Returns the length of this Line.
Side effects: None.
Example:
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 7.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def get_number_of_clones(self):
return self.timescloned
"""
What comes in:
-- self
What goes out:
-- Returns the number of times that this Line has been cloned
(via the clone method).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
would print:
3 [since there are three line1.clone() statements]
0 [since there are no line2.clone() statements]
1 [since there is one line3.clone() statement]
0 [since there are no line4.clone() statements]
0 [since there are no line5.clone() statements]
Type hints:
:rtype: int:
"""
# ---------------------------------------------------------------------
# DONE: 8.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def line_plus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is the sum of this Line's start (a Point)
and the other_line's start (another Point).
-- end is the sum of this Line's end (a Point)
and the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
would print: Line[(600, 33), (500, 16)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 9.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x + other_line.start.x, self.start.y + other_line.start.y)
end = Point(self.end.x + other_line.end.x, self.end.y + other_line.end.y)
line_plus = Line(start, end)
return line_plus
def line_minus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is this Line's start (a Point)
minus the other_line's start (another Point).
-- end is this Line's end (a Point)
minus the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
would print: Line[(400, 7), (-300, 0)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 10.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y)
end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y)
line_minus = Line(start, end)
return line_minus
def midpoint(self):
"""
What comes in:
-- self
What goes out: returns a Point at the midpoint of this Line.
Side effects: None.
Example:
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
Type hints:
:rtype: Point
"""
# ---------------------------------------------------------------------
# DONE: 11.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
midpoint = Point((self.end.x + self.start.x)/2, (self.end.y + self.start.y)/2)
return midpoint
def is_parallel(self, line2):
"""
What comes in:
-- self
-- another Line object (line2)
What goes out: Returns True if this Line is parallel to the
given Line (line2). Returns False otherwise.
*** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.
Side effects: None.
Examples:
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
Type hints:
:type line2: Line
:rtype: bool
"""
selfslopex = (self.end.x - self.start.x)
line2slopex = (line2.end.x - line2.start.x)
if line2slopex == 0:
if line2slopex == selfslopex:
return True
else:
return False
if selfslopex == 0:
return False
selfslope =((self.end.y - self.start.y)/(self.end.x - self.start.x))
line2slope = ((line2.end.y - line2.start.y)/ (line2.end.x - line2.start.x))
if round(line2slope, 10) == round(selfslope, 10):
return True
else:
return False
# ---------------------------------------------------------------------
# DONE: 12.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
#######################################################################
#
# IMPORTANT: When you test whether two FLOATING POINT numbers
# are "equal", you must deal with the imprecision
# of floating-point arithmetic. For example, in REAL arithmetic,
# 1 / (24 * math.pi - 20 * math.pi)
# and
# 3 / (72 * math.pi - 60 * math.pi)
# are equal. But in FLOATING point arithmetic, they are:
# 0.07957747154594767
# and
# 0.07957747154594765
# respectively (hence NOT equal).
# Try it out if you don't believe me!
#
#######################################################################
# IMPORTANT BOTTOM-LINE: When you want to test whether two
# FLOATING POINT numbers a and b are the same, as in this method,
# DON'T use: a == b
# INSTEAD use: round(a, 12) == round(b, 12)
########################################################################
#
# The latter compares the numbers rounded to 12 decimal places.
# In the context of this exercise, doing so is adequate to ignore
# floating-point errors while distinguishing numbers that really
# are different from each other.
#######################################################################
def reset(self):
self.start = self.originalstart
self.end = self.originalend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its start and end points
revert to what they were when this Line was constructed.
Examples:
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
... [various actions, including some like these:]
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
"""
# ---------------------------------------------------------------------
# DONE: 13.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
###############################################################################
# The TEST functions for the Line class begin here.
#
# We have already written the TEST functions. They all take the form:
# -- m1t.run_test_BLAH() # This runs OUR tests.
# -- One more test (or set of tests) that came directly from the Example
# in the specification.
###############################################################################
def run_test_init():
""" Tests the __init__ method of the Line class. """
m1t.run_test_init() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
print('The above should print:')
print(' Point(30, 17)')
print(' Point(50, 80)')
print(' True')
print(' False')
def run_test_clone():
""" Tests the clone method of the Line class. """
m1t.run_test_clone() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(30, 17), (50, 80)]')
print(' True')
print(' False')
print(' False')
print(' False')
print(' Line[(11, 12), (50, 80)]')
print(' Line[(30, 17), (50, 80)')
print(' False')
def run_test_reverse():
""" Tests the reverse method of the Line class. """
m1t.run_test_reverse() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(50, 80), (30, 17)')
print(' False')
print(' True')
def run_test_slope():
""" Tests the slope method of the Line class. """
m1t.run_test_slope() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
print('The above should print:')
print(' 0.25 (approximately)')
print(' inf')
print(' False')
def run_test_length():
""" Tests the length method of the Line class. """
m1t.run_test_length() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
print('The above should print:')
print(' 66.0')
print(' 5.0 (approximately)')
def run_test_get_number_of_clones():
""" Tests the get_number_of_clones method of the Line class. """
m1t.run_test_get_number_of_clones() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
print('The above should print 3, then 0, then 1, then 0, then 0.')
def run_test_line_plus():
""" Tests the line_plus method of the Line class. """
m1t.run_test_line_plus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
print('The above should print: Line[(600, 33), (500, 16)]')
def run_test_line_minus():
""" Tests the line_minus method of the Line class. """
m1t.run_test_line_minus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
print('The above should print: Line[(400, 7), (-300, 0)]')
def run_test_midpoint():
""" Tests the midpoint method of the Line class. """
m1t.run_test_midpoint() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
print('The above should print: Point(6, 15)')
def run_test_is_parallel():
""" Tests the is_parallel method of the Line class. """
m1t.run_test_is_parallel() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
print('The above should print:')
print(' True, True, False, False, True, True')
def run_test_reset():
""" Tests the reset method of the Line class. """
m1t.run_test_reset() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
print('The above should print:')
print(' Line[(3, 4), (100, 300)]')
print(' Line[(0, 1), (99, 4)]')
print(' Line[(-3, -4), (3, 4)]')
print(' Line[(0, 1), (10, 20)]')
# -----------------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# It is necessary here to enable the automatic testing in m1t_test_Line.py.
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 38.270729
| 98
| 0.479757
|
import math
import m1t_test_Line as m1t
| true
| true
|
7902764de6bc314f57e26cb5be2bf447033bf77b
| 1,601
|
py
|
Python
|
test/rules/outputs/test_name.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | 1
|
2019-03-19T22:49:38.000Z
|
2019-03-19T22:49:38.000Z
|
test/rules/outputs/test_name.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | null | null | null |
test/rules/outputs/test_name.py
|
j0lly/cfn-python-lint
|
3032bab8fe190763bd0df1c34905c3528ceb411f
|
[
"MIT-0"
] | 1
|
2020-05-04T16:32:19.000Z
|
2020-05-04T16:32:19.000Z
|
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.outputs.Name import Name # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestName(BaseRuleTestCase):
"""Test template outputs Names"""
def setUp(self):
"""Setup"""
super(TestName, self).setUp()
self.collection.register(Name())
self.success_templates = [
'fixtures/templates/good/outputs/name.yaml'
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('fixtures/templates/bad/outputs/name.yaml', 1)
| 42.131579
| 87
| 0.723923
|
from cfnlint.rules.outputs.Name import Name
from .. import BaseRuleTestCase
class TestName(BaseRuleTestCase):
def setUp(self):
super(TestName, self).setUp()
self.collection.register(Name())
self.success_templates = [
'fixtures/templates/good/outputs/name.yaml'
]
def test_file_positive(self):
self.helper_file_positive()
def test_file_negative(self):
self.helper_file_negative('fixtures/templates/bad/outputs/name.yaml', 1)
| true
| true
|
79027676c5ca108e115bc437519299eb7bbd02f4
| 1,713
|
py
|
Python
|
coffee-maturation/src/models/non_maximum.py
|
dahem/coffe-images
|
2af526c57c08317829e0b99af83b11c9fb9182da
|
[
"MIT"
] | null | null | null |
coffee-maturation/src/models/non_maximum.py
|
dahem/coffe-images
|
2af526c57c08317829e0b99af83b11c9fb9182da
|
[
"MIT"
] | null | null | null |
coffee-maturation/src/models/non_maximum.py
|
dahem/coffe-images
|
2af526c57c08317829e0b99af83b11c9fb9182da
|
[
"MIT"
] | null | null | null |
import numpy as np
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the boxes are integers, convert them to floats (due to divisions)
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
scores = boxes[:,4]
# compute the area of the boxes and sort the boxes by their score
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(scores)[::-1]
# keep looking while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add its value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest coordinates for the start of the overlap area and the smallest coordinates for the end
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the overlap
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the list that have an overlap over the threshold
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the boxes that were picked
return boxes[pick].astype("float")
| 24.126761
| 108
| 0.615879
|
import numpy as np
def non_max_suppression_fast(boxes, overlapThresh):
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
scores = boxes[:,4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(scores)[::-1]
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
return boxes[pick].astype("float")
| true
| true
|
79027720c502b22e5dd0c8ed61a61b7ef0298deb
| 3,095
|
py
|
Python
|
pypureclient/flasharray/FA_2_2/models/username.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_2/models/username.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_2/models/username.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Username(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'str'
}
attribute_map = {
'username': 'username'
}
required_args = {
}
def __init__(
self,
username=None, # type: str
):
"""
Keyword args:
username (str): The username of the user.
"""
if username is not None:
self.username = username
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Username, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Username):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.633929
| 105
| 0.53958
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_2 import models
class Username(object):
swagger_types = {
'username': 'str'
}
attribute_map = {
'username': 'username'
}
required_args = {
}
def __init__(
self,
username=None,
):
if username is not None:
self.username = username
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Username`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Username, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Username):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790277628e358d097d7c6420008ccc2991c4275a
| 4,646
|
py
|
Python
|
autolens/aggregator/fit_imaging.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | null | null | null |
autolens/aggregator/fit_imaging.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | 10
|
2017-12-22T11:39:33.000Z
|
2018-01-30T09:13:16.000Z
|
autolens/aggregator/fit_imaging.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | null | null | null |
from typing import Optional, List
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autogalaxy.aggregator.imaging import _imaging_from
from autogalaxy.aggregator.abstract import AbstractAgg
from autolens.imaging.fit_imaging import FitImaging
from autolens.analysis.preloads import Preloads
from autolens.aggregator.tracer import _tracer_from
def _fit_imaging_from(
fit: af.Fit,
galaxies: List[ag.Galaxy],
settings_imaging: aa.SettingsImaging = None,
settings_pixelization: aa.SettingsPixelization = None,
settings_inversion: aa.SettingsInversion = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
) -> FitImaging:
"""
Returns a `FitImaging` object from a PyAutoFit database `Fit` object and an instance of galaxies from a non-linear
search model-fit.
This function adds the `hyper_model_image` and `hyper_galaxy_image_path_dict` to the galaxies before performing the
fit, if they were used.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
FitImaging
The fit to the imaging dataset computed via an instance of galaxies.
"""
imaging = _imaging_from(fit=fit, settings_imaging=settings_imaging)
tracer = _tracer_from(fit=fit, galaxies=galaxies)
settings_pixelization = settings_pixelization or fit.value(
name="settings_pixelization"
)
settings_inversion = settings_inversion or fit.value(name="settings_inversion")
preloads = Preloads(use_w_tilde=False)
if use_preloaded_grid:
sparse_grids_of_planes = fit.value(name="preload_sparse_grids_of_planes")
if sparse_grids_of_planes is not None:
preloads = Preloads(
sparse_image_plane_grid_pg_list=sparse_grids_of_planes,
use_w_tilde=False,
)
if len(preloads.sparse_image_plane_grid_pg_list) == 2:
if type(preloads.sparse_image_plane_grid_pg_list[1]) != list:
preloads.sparse_image_plane_grid_pg_list[1] = [
preloads.sparse_image_plane_grid_pg_list[1]
]
return FitImaging(
dataset=imaging,
tracer=tracer,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
preloads=preloads,
use_hyper_scaling=use_hyper_scaling,
)
class FitImagingAgg(AbstractAgg):
def __init__(
self,
aggregator: af.Aggregator,
settings_imaging: Optional[aa.SettingsImaging] = None,
settings_pixelization: Optional[aa.SettingsPixelization] = None,
settings_inversion: Optional[aa.SettingsInversion] = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
):
"""
Wraps a PyAutoFit aggregator in order to create generators of fits to imaging data, corresponding to the
results of a non-linear search model-fit.
"""
super().__init__(aggregator=aggregator)
self.settings_imaging = settings_imaging
self.settings_pixelization = settings_pixelization
self.settings_inversion = settings_inversion
self.use_preloaded_grid = use_preloaded_grid
self.use_hyper_scaling = use_hyper_scaling
def make_object_for_gen(self, fit, galaxies) -> FitImaging:
"""
Creates a `FitImaging` object from a `ModelInstance` that contains the galaxies of a sample from a non-linear
search.
Parameters
----------
fit
A PyAutoFit database Fit object containing the generators of the results of PyAutoGalaxy model-fits.
galaxies
A list of galaxies corresponding to a sample of a non-linear search and model-fit.
Returns
-------
FitImaging
A fit to imaging data whose galaxies are a sample of a PyAutoFit non-linear search.
"""
return _fit_imaging_from(
fit=fit,
galaxies=galaxies,
settings_imaging=self.settings_imaging,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
use_preloaded_grid=self.use_preloaded_grid,
use_hyper_scaling=self.use_hyper_scaling,
)
| 35.738462
| 120
| 0.667456
|
from typing import Optional, List
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autogalaxy.aggregator.imaging import _imaging_from
from autogalaxy.aggregator.abstract import AbstractAgg
from autolens.imaging.fit_imaging import FitImaging
from autolens.analysis.preloads import Preloads
from autolens.aggregator.tracer import _tracer_from
def _fit_imaging_from(
fit: af.Fit,
galaxies: List[ag.Galaxy],
settings_imaging: aa.SettingsImaging = None,
settings_pixelization: aa.SettingsPixelization = None,
settings_inversion: aa.SettingsInversion = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
) -> FitImaging:
imaging = _imaging_from(fit=fit, settings_imaging=settings_imaging)
tracer = _tracer_from(fit=fit, galaxies=galaxies)
settings_pixelization = settings_pixelization or fit.value(
name="settings_pixelization"
)
settings_inversion = settings_inversion or fit.value(name="settings_inversion")
preloads = Preloads(use_w_tilde=False)
if use_preloaded_grid:
sparse_grids_of_planes = fit.value(name="preload_sparse_grids_of_planes")
if sparse_grids_of_planes is not None:
preloads = Preloads(
sparse_image_plane_grid_pg_list=sparse_grids_of_planes,
use_w_tilde=False,
)
if len(preloads.sparse_image_plane_grid_pg_list) == 2:
if type(preloads.sparse_image_plane_grid_pg_list[1]) != list:
preloads.sparse_image_plane_grid_pg_list[1] = [
preloads.sparse_image_plane_grid_pg_list[1]
]
return FitImaging(
dataset=imaging,
tracer=tracer,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
preloads=preloads,
use_hyper_scaling=use_hyper_scaling,
)
class FitImagingAgg(AbstractAgg):
def __init__(
self,
aggregator: af.Aggregator,
settings_imaging: Optional[aa.SettingsImaging] = None,
settings_pixelization: Optional[aa.SettingsPixelization] = None,
settings_inversion: Optional[aa.SettingsInversion] = None,
use_preloaded_grid: bool = True,
use_hyper_scaling: bool = True,
):
super().__init__(aggregator=aggregator)
self.settings_imaging = settings_imaging
self.settings_pixelization = settings_pixelization
self.settings_inversion = settings_inversion
self.use_preloaded_grid = use_preloaded_grid
self.use_hyper_scaling = use_hyper_scaling
def make_object_for_gen(self, fit, galaxies) -> FitImaging:
return _fit_imaging_from(
fit=fit,
galaxies=galaxies,
settings_imaging=self.settings_imaging,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
use_preloaded_grid=self.use_preloaded_grid,
use_hyper_scaling=self.use_hyper_scaling,
)
| true
| true
|
79027783b2b2077d0cd6769e7b381925dd69689c
| 8,138
|
py
|
Python
|
nova/conductor/tasks/live_migrate.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
nova/conductor/tasks/live_migrate.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
nova/conductor/tasks/live_migrate.py
|
alvarolopez/nova
|
97a97205a980459bae1f61aec3d4c7e0bec1e9c2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.i18n import _
from nova import image
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
migrate_opt = cfg.IntOpt('migrate_max_retries',
default=-1,
help='Number of times to retry live-migration before failing. '
'If == -1, try until out of hosts. '
'If == 0, only try once, no retries.')
CONF = cfg.CONF
CONF.register_opt(migrate_opt)
class LiveMigrationTask(object):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit):
self.context = context
self.instance = instance
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.source = instance.host
self.migrate_data = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.image_api = image.API()
def execute(self):
self._check_instance_is_active()
self._check_host_is_up(self.source)
if not self.destination:
self.destination = self._find_destination()
else:
self._check_requested_destination()
# TODO(johngarbutt) need to move complexity out of compute manager
# TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=self.migrate_data)
def rollback(self):
# TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
# rollback call right now.
raise NotImplementedError()
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid = self.instance.uuid,
attr = 'power_state',
state = self.instance.power_state,
method = 'live migrate')
def _check_host_is_up(self, host):
try:
service = objects.Service.get_by_compute_host(self.context, host)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
self._check_compatible_with_source_hypervisor(self.destination)
self._call_livem_checks_on_host(self.destination)
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
avail = self._get_compute_info(self.destination)['free_ram_mb']
mem_inst = self.instance.memory_mb
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info['hypervisor_type']
destination_type = destination_info['hypervisor_type']
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info['hypervisor_version']
destination_version = destination_info['hypervisor_version']
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
def _call_livem_checks_on_host(self, destination):
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit)
def _find_destination(self):
# TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
request_spec = scheduler_utils.build_request_spec(self.context, image,
[self.instance])
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
filter_properties = {'ignore_hosts': attempted_hosts}
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_properties)
host = self.scheduler_client.select_destinations(self.context,
request_spec, filter_properties)[0]['host']
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host)
except exception.Invalid as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
host = None
return host
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.NoValidHost(reason=msg)
def execute(context, instance, destination,
block_migration, disk_over_commit):
task = LiveMigrationTask(context, instance,
destination,
block_migration,
disk_over_commit)
# TODO(johngarbutt) create a superclass that contains a safe_execute call
return task.execute()
| 41.948454
| 78
| 0.652249
|
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.i18n import _
from nova import image
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
migrate_opt = cfg.IntOpt('migrate_max_retries',
default=-1,
help='Number of times to retry live-migration before failing. '
'If == -1, try until out of hosts. '
'If == 0, only try once, no retries.')
CONF = cfg.CONF
CONF.register_opt(migrate_opt)
class LiveMigrationTask(object):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit):
self.context = context
self.instance = instance
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.source = instance.host
self.migrate_data = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.image_api = image.API()
def execute(self):
self._check_instance_is_active()
self._check_host_is_up(self.source)
if not self.destination:
self.destination = self._find_destination()
else:
self._check_requested_destination()
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migrate_data=self.migrate_data)
def rollback(self):
raise NotImplementedError()
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid = self.instance.uuid,
attr = 'power_state',
state = self.instance.power_state,
method = 'live migrate')
def _check_host_is_up(self, host):
try:
service = objects.Service.get_by_compute_host(self.context, host)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
self._check_compatible_with_source_hypervisor(self.destination)
self._call_livem_checks_on_host(self.destination)
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
avail = self._get_compute_info(self.destination)['free_ram_mb']
mem_inst = self.instance.memory_mb
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info['hypervisor_type']
destination_type = destination_info['hypervisor_type']
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info['hypervisor_version']
destination_version = destination_info['hypervisor_version']
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
def _call_livem_checks_on_host(self, destination):
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit)
def _find_destination(self):
attempted_hosts = [self.source]
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
request_spec = scheduler_utils.build_request_spec(self.context, image,
[self.instance])
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
filter_properties = {'ignore_hosts': attempted_hosts}
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_properties)
host = self.scheduler_client.select_destinations(self.context,
request_spec, filter_properties)[0]['host']
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host)
except exception.Invalid as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
host = None
return host
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.NoValidHost(reason=msg)
def execute(context, instance, destination,
block_migration, disk_over_commit):
task = LiveMigrationTask(context, instance,
destination,
block_migration,
disk_over_commit)
return task.execute()
| true
| true
|
790277bbc3a59e973b680871d6476a0e4df1eb85
| 7,731
|
py
|
Python
|
mangum/protocols/lifespan.py
|
tasn/mangum
|
6da7e51ca8e7979f41291ab3f0e698882f219814
|
[
"MIT"
] | 661
|
2020-06-02T01:06:35.000Z
|
2022-03-30T22:40:47.000Z
|
mangum/protocols/lifespan.py
|
tasn/mangum
|
6da7e51ca8e7979f41291ab3f0e698882f219814
|
[
"MIT"
] | 116
|
2020-06-02T02:14:14.000Z
|
2022-03-25T11:54:38.000Z
|
mangum/protocols/lifespan.py
|
tasn/mangum
|
6da7e51ca8e7979f41291ab3f0e698882f219814
|
[
"MIT"
] | 55
|
2020-06-02T02:01:26.000Z
|
2022-03-16T16:13:09.000Z
|
import asyncio
import logging
import types
import typing
import enum
from dataclasses import dataclass
from ..types import ASGIApp, Message
from ..exceptions import LifespanUnsupported, LifespanFailure, UnexpectedMessage
class LifespanCycleState(enum.Enum):
"""
The state of the ASGI `lifespan` connection.
* **CONNECTING** - Initial state. The ASGI application instance will be run with
the connection scope containing the `lifespan` type.
* **STARTUP** - The lifespan startup event has been pushed to the queue to be
received by the application.
* **SHUTDOWN** - The lifespan shutdown event has been pushed to the queue to be
received by the application.
* **FAILED** - A lifespan failure has been detected, and the connection will be
closed with an error.
* **UNSUPPORTED** - An application attempted to send a message before receiving
the lifepan startup event. If the lifespan argument is "on", then the connection
will be closed with an error.
"""
CONNECTING = enum.auto()
STARTUP = enum.auto()
SHUTDOWN = enum.auto()
FAILED = enum.auto()
UNSUPPORTED = enum.auto()
@dataclass
class LifespanCycle:
"""
Manages the application cycle for an ASGI `lifespan` connection.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **state** - An enumerated `LifespanCycleState` type that indicates the state of
the ASGI connection.
* **exception** - An exception raised while handling the ASGI event.
* **app_queue** - An asyncio queue (FIFO) containing messages to be received by the
application.
* **startup_event** - An asyncio event object used to control the application
startup flow.
* **shutdown_event** - An asyncio event object used to control the application
shutdown flow.
* **exception** - An exception raised while handling the ASGI event. This may or
may not be raised depending on the state.
"""
app: ASGIApp
lifespan: str
state: LifespanCycleState = LifespanCycleState.CONNECTING
exception: typing.Optional[BaseException] = None
def __post_init__(self) -> None:
self.logger = logging.getLogger("mangum.lifespan")
self.loop = asyncio.get_event_loop()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.startup_event: asyncio.Event = asyncio.Event()
self.shutdown_event: asyncio.Event = asyncio.Event()
def __enter__(self) -> None:
"""
Runs the event loop for application startup.
"""
self.loop.create_task(self.run())
self.loop.run_until_complete(self.startup())
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> None:
"""
Runs the event loop for application shutdown.
"""
self.loop.run_until_complete(self.shutdown())
async def run(self) -> None:
"""
Calls the application with the `lifespan` connection scope.
"""
try:
await self.app({"type": "lifespan"}, self.receive, self.send)
except LifespanUnsupported:
self.logger.info("ASGI 'lifespan' protocol appears unsupported.")
except (LifespanFailure, UnexpectedMessage) as exc:
self.exception = exc
except BaseException as exc:
self.logger.error("Exception in 'lifespan' protocol.", exc_info=exc)
finally:
self.startup_event.set()
self.shutdown_event.set()
async def receive(self) -> Message:
"""
Awaited by the application to receive ASGI `lifespan` events.
"""
if self.state is LifespanCycleState.CONNECTING:
# Connection established. The next event returned by the queue will be
# `lifespan.startup` to inform the application that the connection is
# ready to receive lfiespan messages.
self.state = LifespanCycleState.STARTUP
elif self.state is LifespanCycleState.STARTUP:
# Connection shutting down. The next event returned by the queue will be
# `lifespan.shutdown` to inform the application that the connection is now
# closing so that it may perform cleanup.
self.state = LifespanCycleState.SHUTDOWN
return await self.app_queue.get()
async def send(self, message: Message) -> None:
"""
Awaited by the application to send ASGI `lifespan` events.
"""
message_type = message["type"]
self.logger.info(
"%s: '%s' event received from application.", self.state, message_type
)
if self.state is LifespanCycleState.CONNECTING:
if self.lifespan == "on":
raise LifespanFailure(
"Lifespan connection failed during startup and lifespan is 'on'."
)
# If a message is sent before the startup event is received by the
# application, then assume that lifespan is unsupported.
self.state = LifespanCycleState.UNSUPPORTED
raise LifespanUnsupported("Lifespan protocol appears unsupported.")
if message_type not in (
"lifespan.startup.complete",
"lifespan.shutdown.complete",
"lifespan.startup.failed",
"lifespan.shutdown.failed",
):
self.state = LifespanCycleState.FAILED
raise UnexpectedMessage(f"Unexpected '{message_type}' event received.")
if self.state is LifespanCycleState.STARTUP:
if message_type == "lifespan.startup.complete":
self.startup_event.set()
elif message_type == "lifespan.startup.failed":
self.state = LifespanCycleState.FAILED
self.startup_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan startup failure. {message}")
elif self.state is LifespanCycleState.SHUTDOWN:
if message_type == "lifespan.shutdown.complete":
self.shutdown_event.set()
elif message_type == "lifespan.shutdown.failed":
self.state = LifespanCycleState.FAILED
self.shutdown_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan shutdown failure. {message}")
async def startup(self) -> None:
"""
Pushes the `lifespan` startup event to application queue and handles errors.
"""
self.logger.info("Waiting for application startup.")
await self.app_queue.put({"type": "lifespan.startup"})
await self.startup_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
if not self.exception:
self.logger.info("Application startup complete.")
else:
self.logger.info("Application startup failed.")
async def shutdown(self) -> None:
"""
Pushes the `lifespan` shutdown event to application queue and handles errors.
"""
self.logger.info("Waiting for application shutdown.")
await self.app_queue.put({"type": "lifespan.shutdown"})
await self.shutdown_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
| 39.443878
| 87
| 0.644289
|
import asyncio
import logging
import types
import typing
import enum
from dataclasses import dataclass
from ..types import ASGIApp, Message
from ..exceptions import LifespanUnsupported, LifespanFailure, UnexpectedMessage
class LifespanCycleState(enum.Enum):
CONNECTING = enum.auto()
STARTUP = enum.auto()
SHUTDOWN = enum.auto()
FAILED = enum.auto()
UNSUPPORTED = enum.auto()
@dataclass
class LifespanCycle:
app: ASGIApp
lifespan: str
state: LifespanCycleState = LifespanCycleState.CONNECTING
exception: typing.Optional[BaseException] = None
def __post_init__(self) -> None:
self.logger = logging.getLogger("mangum.lifespan")
self.loop = asyncio.get_event_loop()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.startup_event: asyncio.Event = asyncio.Event()
self.shutdown_event: asyncio.Event = asyncio.Event()
def __enter__(self) -> None:
self.loop.create_task(self.run())
self.loop.run_until_complete(self.startup())
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> None:
self.loop.run_until_complete(self.shutdown())
async def run(self) -> None:
try:
await self.app({"type": "lifespan"}, self.receive, self.send)
except LifespanUnsupported:
self.logger.info("ASGI 'lifespan' protocol appears unsupported.")
except (LifespanFailure, UnexpectedMessage) as exc:
self.exception = exc
except BaseException as exc:
self.logger.error("Exception in 'lifespan' protocol.", exc_info=exc)
finally:
self.startup_event.set()
self.shutdown_event.set()
async def receive(self) -> Message:
if self.state is LifespanCycleState.CONNECTING:
self.state = LifespanCycleState.STARTUP
elif self.state is LifespanCycleState.STARTUP:
self.state = LifespanCycleState.SHUTDOWN
return await self.app_queue.get()
async def send(self, message: Message) -> None:
message_type = message["type"]
self.logger.info(
"%s: '%s' event received from application.", self.state, message_type
)
if self.state is LifespanCycleState.CONNECTING:
if self.lifespan == "on":
raise LifespanFailure(
"Lifespan connection failed during startup and lifespan is 'on'."
)
self.state = LifespanCycleState.UNSUPPORTED
raise LifespanUnsupported("Lifespan protocol appears unsupported.")
if message_type not in (
"lifespan.startup.complete",
"lifespan.shutdown.complete",
"lifespan.startup.failed",
"lifespan.shutdown.failed",
):
self.state = LifespanCycleState.FAILED
raise UnexpectedMessage(f"Unexpected '{message_type}' event received.")
if self.state is LifespanCycleState.STARTUP:
if message_type == "lifespan.startup.complete":
self.startup_event.set()
elif message_type == "lifespan.startup.failed":
self.state = LifespanCycleState.FAILED
self.startup_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan startup failure. {message}")
elif self.state is LifespanCycleState.SHUTDOWN:
if message_type == "lifespan.shutdown.complete":
self.shutdown_event.set()
elif message_type == "lifespan.shutdown.failed":
self.state = LifespanCycleState.FAILED
self.shutdown_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan shutdown failure. {message}")
async def startup(self) -> None:
self.logger.info("Waiting for application startup.")
await self.app_queue.put({"type": "lifespan.startup"})
await self.startup_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
if not self.exception:
self.logger.info("Application startup complete.")
else:
self.logger.info("Application startup failed.")
async def shutdown(self) -> None:
self.logger.info("Waiting for application shutdown.")
await self.app_queue.put({"type": "lifespan.shutdown"})
await self.shutdown_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
| true
| true
|
790278d542d109c0e271928ce9ea3f02744bf76c
| 7,278
|
py
|
Python
|
ostap/fitting/tests/test_fitting_efficiency.py
|
TatianaOvsiannikova/ostap
|
a005a78b4e2860ac8f4b618e94b4b563b2eddcf1
|
[
"BSD-3-Clause"
] | 14
|
2017-03-24T12:38:08.000Z
|
2022-02-21T05:00:57.000Z
|
ostap/fitting/tests/test_fitting_efficiency.py
|
TatianaOvsiannikova/ostap
|
a005a78b4e2860ac8f4b618e94b4b563b2eddcf1
|
[
"BSD-3-Clause"
] | 10
|
2019-03-08T18:48:42.000Z
|
2022-03-22T11:59:48.000Z
|
ostap/fitting/tests/test_fitting_efficiency.py
|
TatianaOvsiannikova/ostap
|
a005a78b4e2860ac8f4b618e94b4b563b2eddcf1
|
[
"BSD-3-Clause"
] | 11
|
2017-03-23T15:29:58.000Z
|
2022-02-21T05:03:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file test_fitting_efficiency.py
# Test module for ostap/fitting/efficiency.py
# =============================================================================
""" Test module for ostap/fitting/efficiency.py
"""
# =============================================================================
__author__ = "Ostap developers"
__all__ = () ## nothing to import
# =============================================================================
import ROOT, random, math
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID, Ostap
from ostap.logger.utils import rooSilent
from ostap.fitting.efficiency import Efficiency1D
from ostap.utils.timing import timing
from ostap.plotting.canvas import use_canvas
from ostap.utils.utils import wait
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_efficiency' )
else :
logger = getLogger ( __name__ )
# =============================================================================
## make
x = ROOT.RooRealVar ( 'x', 'test' , 0 , 10 )
xmin , xmax = x.minmax()
acc = ROOT.RooCategory( 'cut','cut')
acc.defineType('accept',1)
acc.defineType('reject',0)
varset = ROOT.RooArgSet ( x , acc )
ds = ROOT.RooDataSet ( dsID() , 'test data' , varset )
eff0 = Models.Monotonic_pdf ( 'E0' , xvar = x , power = 3 , increasing = True )
eff0.phis = 3.1415/1 , 3.1415/2 , 3.1415/3
margin = 1.25
emax = margin * eff0 ( x.getMax() )
N = 20000
for i in range ( N ) :
xv = random.uniform ( xmin , xmax )
x.setVal ( xv )
ev = random.uniform ( 0 , emax )
if eff0( xv ) > ev : acc.setIndex(1)
else : acc.setIndex(0)
ds.add ( varset )
np = 20
dx = (xmax-xmin)/np
points = [ dx * i for i in range ( np + 1 ) ]
# =================================================================================
## make comparison table
def make_table ( func , title , prefix = "# ") :
rows = [ ( 'x' , 'fitted eff [%]' , 'true eff [%]' , 'delta [%]' ) ]
for p in points :
e1 = 100 * func ( p , error = True )
e2 = 100 * eff0 ( p ) / emax
d = e1 - e2
row = "%4.2f" % p , \
"%s" % e1.toString ( '(%5.2f+-%4.2f)' ) ,\
"%.2f" % e2 ,\
"%s" % d .toString ( '(%5.2f+-%4.2f)' )
rows.append ( row )
from ostap.logger.table import table
return table ( rows , title = title , prefix = prefix )
# =============================================================================
# use some PDF to parameterize efficiciency
def test_pdf () :
logger = getLogger ( 'test_pdf' )
effPdf = Models.Monotonic_pdf ( 'P6' , xvar = x , power = 4 , increasing = True )
maxe = margin * effPdf ( xmax )
s0 = min ( 1.0 / emax , 1.0 / maxe )
scale = ROOT.RooRealVar ( 'scaleX' , 'scaleX' , s0 , 0.2 * s0 , 5.0 * s0 )
eff2 = Efficiency1D ( 'E2' , effPdf , cut = acc , scale = scale )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Monotonic_pdf \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Monotonic_pdf)\n%s" % make_table (
eff2 , title = 'using Monotonic_pdf') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars1 () :
from ostap.fitting.roofuncs import BernsteinPoly as BP
logger = getLogger ( 'test_vars1' )
f = BP ( 'G' , xvar = x , power = 4 )
f.pars = 0.2 , 0.2 , 0.2 , 0.2
eff2 = Efficiency1D ( 'E3' , f.fun , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-BernsteinPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using BernsteinPoly)\n%s" % make_table (
eff2 , title = 'using BernsteinPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars2 () :
logger = getLogger ( 'test_vars2' )
from ostap.fitting.roofuncs import MonotonicPoly as MP
f = MP ( 'G' , xvar = x , increasing = True , power = 4 )
f.pars = 0.6 , 0.8 , -0.1 , -0.6
f.a = 0.06
f.b = 2.72
f.a.release ()
f.b.release ()
eff2 = Efficiency1D ( 'E4' , f , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-MonotonicPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using MonotonicPoly)\n%s" % make_table (
eff2 , title = 'using MonotonicPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
# use some functions to parameterize efficiciency
def test_vars3 () :
logger = getLogger ( 'test_vars3' )
a = ROOT.RooRealVar ( 'A', 'a' , 0.05 , 0 , 1 )
b = ROOT.RooRealVar ( 'B', 'b' , 0.02 , -0.05 , 0.1 )
c = ROOT.RooRealVar ( 'C', 'c' , 0.005 , 0 , 0.1 )
import ostap.fitting.roofuncs as R
from ostap.fitting.funbasic import Fun1D
X = Fun1D ( x , xvar = x , name = 'X' )
##F = (X**2) * c + X * b + a
F = a + b * X + c * X**2
eff2 = Efficiency1D ( 'E5' , F , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Fun1D \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Fun1D)\n%s" % make_table (
eff2 , title = 'using Fnu1D') )
with wait ( 2 ) , use_canvas ( 'test_vars3' ) :
f2 = eff2.draw ( ds , nbins = 25 )
# =============================================================================
if '__main__' == __name__ :
with timing ("PDF" , logger ) :
test_pdf ()
with timing ("Vars1" , logger ) :
test_vars1 ()
with timing ("Vars2" , logger ) :
test_vars2 ()
with timing ("Vars3" , logger ) :
test_vars3 ()
# =============================================================================
## The END
# =============================================================================
| 34.657143
| 89
| 0.436796
|
__author__ = "Ostap developers"
__all__ = () om, math
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import cpp, VE, dsID, Ostap
from ostap.logger.utils import rooSilent
from ostap.fitting.efficiency import Efficiency1D
from ostap.utils.timing import timing
from ostap.plotting.canvas import use_canvas
from ostap.utils.utils import wait
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_efficiency' )
else :
logger = getLogger ( __name__ )
= ROOT.RooRealVar ( 'x', 'test' , 0 , 10 )
xmin , xmax = x.minmax()
acc = ROOT.RooCategory( 'cut','cut')
acc.defineType('accept',1)
acc.defineType('reject',0)
varset = ROOT.RooArgSet ( x , acc )
ds = ROOT.RooDataSet ( dsID() , 'test data' , varset )
eff0 = Models.Monotonic_pdf ( 'E0' , xvar = x , power = 3 , increasing = True )
eff0.phis = 3.1415/1 , 3.1415/2 , 3.1415/3
margin = 1.25
emax = margin * eff0 ( x.getMax() )
N = 20000
for i in range ( N ) :
xv = random.uniform ( xmin , xmax )
x.setVal ( xv )
ev = random.uniform ( 0 , emax )
if eff0( xv ) > ev : acc.setIndex(1)
else : acc.setIndex(0)
ds.add ( varset )
np = 20
dx = (xmax-xmin)/np
points = [ dx * i for i in range ( np + 1 ) ]
title , prefix = "# ") :
rows = [ ( 'x' , 'fitted eff [%]' , 'true eff [%]' , 'delta [%]' ) ]
for p in points :
e1 = 100 * func ( p , error = True )
e2 = 100 * eff0 ( p ) / emax
d = e1 - e2
row = "%4.2f" % p , \
"%s" % e1.toString ( '(%5.2f+-%4.2f)' ) ,\
"%.2f" % e2 ,\
"%s" % d .toString ( '(%5.2f+-%4.2f)' )
rows.append ( row )
from ostap.logger.table import table
return table ( rows , title = title , prefix = prefix )
def test_pdf () :
logger = getLogger ( 'test_pdf' )
effPdf = Models.Monotonic_pdf ( 'P6' , xvar = x , power = 4 , increasing = True )
maxe = margin * effPdf ( xmax )
s0 = min ( 1.0 / emax , 1.0 / maxe )
scale = ROOT.RooRealVar ( 'scaleX' , 'scaleX' , s0 , 0.2 * s0 , 5.0 * s0 )
eff2 = Efficiency1D ( 'E2' , effPdf , cut = acc , scale = scale )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Monotonic_pdf \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Monotonic_pdf)\n%s" % make_table (
eff2 , title = 'using Monotonic_pdf') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
def test_vars1 () :
from ostap.fitting.roofuncs import BernsteinPoly as BP
logger = getLogger ( 'test_vars1' )
f = BP ( 'G' , xvar = x , power = 4 )
f.pars = 0.2 , 0.2 , 0.2 , 0.2
eff2 = Efficiency1D ( 'E3' , f.fun , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-BernsteinPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using BernsteinPoly)\n%s" % make_table (
eff2 , title = 'using BernsteinPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
def test_vars2 () :
logger = getLogger ( 'test_vars2' )
from ostap.fitting.roofuncs import MonotonicPoly as MP
f = MP ( 'G' , xvar = x , increasing = True , power = 4 )
f.pars = 0.6 , 0.8 , -0.1 , -0.6
f.a = 0.06
f.b = 2.72
f.a.release ()
f.b.release ()
eff2 = Efficiency1D ( 'E4' , f , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-MonotonicPoly \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using MonotonicPoly)\n%s" % make_table (
eff2 , title = 'using MonotonicPoly') )
with wait ( 2 ) , use_canvas ( 'test_pdf' ) :
f2 = eff2.draw ( ds , nbins = 25 )
def test_vars3 () :
logger = getLogger ( 'test_vars3' )
a = ROOT.RooRealVar ( 'A', 'a' , 0.05 , 0 , 1 )
b = ROOT.RooRealVar ( 'B', 'b' , 0.02 , -0.05 , 0.1 )
c = ROOT.RooRealVar ( 'C', 'c' , 0.005 , 0 , 0.1 )
import ostap.fitting.roofuncs as R
from ostap.fitting.funbasic import Fun1D
X = Fun1D ( x , xvar = x , name = 'X' )
*2
eff2 = Efficiency1D ( 'E5' , F , cut = acc , xvar = x )
r2 = eff2.fitTo ( ds )
logger.info ( "Fit result using-Fun1D \n%s" % r2.table ( prefix = "# ") )
logger.info ( "Compare with true efficiency (using Fun1D)\n%s" % make_table (
eff2 , title = 'using Fnu1D') )
with wait ( 2 ) , use_canvas ( 'test_vars3' ) :
f2 = eff2.draw ( ds , nbins = 25 )
if '__main__' == __name__ :
with timing ("PDF" , logger ) :
test_pdf ()
with timing ("Vars1" , logger ) :
test_vars1 ()
with timing ("Vars2" , logger ) :
test_vars2 ()
with timing ("Vars3" , logger ) :
test_vars3 ()
| true
| true
|
79027a7d23c5a15bd7a28fff8bfec6564da9122d
| 13,464
|
py
|
Python
|
demos/output_usage.py
|
songshanyuwu/PyWebIO
|
419d10747ad90a76386411fb0d285f049337b093
|
[
"MIT"
] | 1
|
2021-02-25T23:41:09.000Z
|
2021-02-25T23:41:09.000Z
|
demos/output_usage.py
|
songshanyuwu/PyWebIO
|
419d10747ad90a76386411fb0d285f049337b093
|
[
"MIT"
] | null | null | null |
demos/output_usage.py
|
songshanyuwu/PyWebIO
|
419d10747ad90a76386411fb0d285f049337b093
|
[
"MIT"
] | null | null | null |
"""
Output demo
^^^^^^^^^^^^^^
Demonstrate various output usage supported by PyWebIO
:demo_host:`Demo </?pywebio_api=output_usage>` `Source code <https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py>`_
"""
from pywebio import start_server
from pywebio.output import *
from pywebio.session import hold, get_info
from functools import partial
def t(eng, chinese):
"""return English or Chinese text according to the user's browser language"""
return chinese if 'zh' in get_info().user_language else eng
def code_block(code, strip_indent=4):
if strip_indent:
lines = (
i[strip_indent:] if (i[:strip_indent] == ' ' * strip_indent) else i
for i in code.splitlines()
)
code = '\n'.join(lines)
code = code.strip('\n')
def run_code(code, scope):
with use_scope(scope):
exec(code, globals())
with use_scope() as scope:
put_code(code, 'python')
put_buttons([{'label': t('Run', '运行'), 'value': '', 'color': 'success'}],
onclick=[partial(run_code, code=code, scope=scope)], small=True)
async def main():
"""PyWebIO Output demo
Demonstrate various output usage supported by PyWebIO.
演示PyWebIO输出模块的使用
"""
put_markdown(t("""# PyWebIO Output demo
You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)
This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).
The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.
""", """# PyWebIO 输出演示
在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。
本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。
PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。
### 基本输出
PyWebIO提供了一些便捷函数来输出表格、链接等格式:
"""), strip_indent=4)
code_block(t(r"""
# Text Output
put_text("Hello world!")
# Table Output
put_table([
['Commodity', 'Price'],
['Apple', '5.5'],
['Banana', '7'],
])
# Markdown Output
put_markdown('~~Strikethrough~~')
# File Output
put_file('hello_word.txt', b'hello word!')
""", r"""
# 文本输出
put_text("Hello world!")
# 表格输出
put_table([
['商品', '价格'],
['苹果', '5.5'],
['香蕉', '7'],
])
# Markdown输出
put_markdown('~~删除线~~')
# 文件输出
put_file('hello_word.txt', b'hello word!')
"""))
put_markdown(t(r"""For all output functions provided by PyWebIO, please refer to the document.
### Combined Output
The output functions whose name starts with put_ can be combined with some output functions as part of the final output:
You can pass `put_xxx()` calls to `put_table()` as cell content:
""", r"""PyWebIO提供的全部输出函数请参考PyWebIO文档
### 组合输出
函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。
比如`put_table()`支持以`put_xxx()`调用作为单元格内容:
"""), strip_indent=4)
code_block(r"""
put_table([
['Type', 'Content'],
['html', put_html('X<sup>2</sup>')],
['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]
['buttons', put_buttons(['A', 'B'], onclick=toast)],
['markdown', put_markdown('`Awesome PyWebIO!`')],
['file', put_file('hello.text', b'hello world')],
['table', put_table([['A', 'B'], ['C', 'D']])]
])
""")
put_markdown(t(r"Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:",
r"类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:"), strip_indent=4)
code_block(r"""
popup('Popup title', [
put_html('<h3>Popup Content</h3>'),
'plain html: <br/>', # equal to put_text('plain html: <br/>')
put_table([['A', 'B'], ['C', 'D']]),
put_buttons(['close_popup()'], onclick=lambda _: close_popup())
])
""")
put_markdown(t(r"For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.",
r"更多接受`put_xxx()`作为参数的输出函数请参考函数文档。"))
put_markdown(t(r"""### Callback
PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.
This is an example:%s
The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:
""", r"""### 事件回调
PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。
下面是一个例子:%s
`put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:
""") % """
```python
from functools import partial
def edit_row(choice, row):
put_markdown("> You click`%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
```
""", strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown("> You click `%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
set_scope('table-callback')
put_markdown(t("Of course, PyWebIO also supports outputting individual button:", "当然,PyWebIO还支持单独的按钮控件:")+r"""
```python
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
```
""", strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown(t(r"""### Output Scope
PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.
The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.
Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.
You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s
The above code will generate the following Scope layout:
""", r"""### 输出域Scope
PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。
输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。
每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。
可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s
以上代码将会产生如下Scope布局:
""") % """
```python
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
```
""", strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html("""<style>
#pywebio-scope-A {border: 1px solid red;}
#pywebio-scope-B {border: 1px solid blue;margin:2px}
#pywebio-scope-C {border: 1px solid green;margin-top:2px}
</style><br/>""")
put_markdown(t(r"""The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.
In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:
""", r"""
输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。
此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:
"""), strip_indent=4)
put_grid([
[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('A', scope='A')])],
[put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('B', scope='B')])],
[put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('C', scope='C')])],
], cell_widths='1fr 10px auto')
put_markdown(t("The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.", "输出函数可以使用`position`参数指定内容在Scope中输出的位置") + """
```python
put_text(now(), scope='A', position=...)
```
""", strip_indent=4)
import datetime
put_buttons([('position=%s' % i, i) for i in [1, 2, 3, -1, -2, -3]],
lambda i: put_text(datetime.datetime.now(), position=i, scope='A'), small=True)
put_markdown(t(r"In addition to `use_scope()`, PyWebIO also provides the following scope control functions:",
r"除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: "))
put_grid([
[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [lambda: clear('B')])],
[put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [lambda: remove('C')])],
[put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [lambda: scroll_to('A')])],
], cell_widths='1fr 10px auto')
put_markdown(t(r"""### Layout
In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.
The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:
- `put_row()` : Use row layout to output content. The content is arranged horizontally
- `put_column()` : Use column layout to output content. The content is arranged vertically
- `put_grid()` : Output content using grid layout
Here is an example by combining `put_row()` and `put_column()`:
""", r"""### 布局
一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。
`pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:
- `put_row()` : 使用行布局输出内容. 内容在水平方向上排列
- `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列
- `put_grid()` : 使用网格布局输出内容
比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:
"""), strip_indent=4)
code_block(r"""
put_row([
put_column([
put_code('A'),
put_row([
put_code('B1'), None, # %s
put_code('B2'), None,
put_code('B3'),
]),
put_code('C'),
]), None,
put_code('D'), None,
put_code('E')
])
""" % t('None represents the space between the output', 'None 表示输出之间的空白'))
put_markdown(t(r"""### Style
If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.
You can set the CSS style for a single `put_xxx()` output:
""", r"""### 样式
如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。
可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:
"""), strip_indent=4)
code_block(r"""
style(put_text('Red'), 'color: red')
put_table([
['A', 'B'],
['C', style(put_text('Red'), 'color: red')],
])
""", strip_indent=4)
put_markdown(t(r"`style()` also accepts a list of output calls:", r"`style()` 也接受列表作为输入:"))
code_block(r"""
style([
put_text('Red'),
put_markdown('~~del~~')
], 'color: red')
put_collapse('title', style([
put_text('text'),
put_markdown('~~del~~'),
], 'margin-left: 20px'))
""", strip_indent=4)
put_markdown(t("""----
For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).
""","""----
PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。
"""), lstrip=True)
await hold()
if __name__ == '__main__':
start_server(main, debug=True, port=8080, cdn=False)
| 37.19337
| 311
| 0.621064
|
from pywebio import start_server
from pywebio.output import *
from pywebio.session import hold, get_info
from functools import partial
def t(eng, chinese):
return chinese if 'zh' in get_info().user_language else eng
def code_block(code, strip_indent=4):
if strip_indent:
lines = (
i[strip_indent:] if (i[:strip_indent] == ' ' * strip_indent) else i
for i in code.splitlines()
)
code = '\n'.join(lines)
code = code.strip('\n')
def run_code(code, scope):
with use_scope(scope):
exec(code, globals())
with use_scope() as scope:
put_code(code, 'python')
put_buttons([{'label': t('Run', '运行'), 'value': '', 'color': 'success'}],
onclick=[partial(run_code, code=code, scope=scope)], small=True)
async def main():
put_markdown(t("""# PyWebIO Output demo
You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)
This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).
The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.
""", """# PyWebIO 输出演示
在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。
本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。
PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。
### 基本输出
PyWebIO提供了一些便捷函数来输出表格、链接等格式:
"""), strip_indent=4)
code_block(t(r"""
# Text Output
put_text("Hello world!")
# Table Output
put_table([
['Commodity', 'Price'],
['Apple', '5.5'],
['Banana', '7'],
])
# Markdown Output
put_markdown('~~Strikethrough~~')
# File Output
put_file('hello_word.txt', b'hello word!')
""", r"""
# 文本输出
put_text("Hello world!")
# 表格输出
put_table([
['商品', '价格'],
['苹果', '5.5'],
['香蕉', '7'],
])
# Markdown输出
put_markdown('~~删除线~~')
# 文件输出
put_file('hello_word.txt', b'hello word!')
"""))
put_markdown(t(r"""For all output functions provided by PyWebIO, please refer to the document.
### Combined Output
The output functions whose name starts with put_ can be combined with some output functions as part of the final output:
You can pass `put_xxx()` calls to `put_table()` as cell content:
""", r"""PyWebIO提供的全部输出函数请参考PyWebIO文档
### 组合输出
函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。
比如`put_table()`支持以`put_xxx()`调用作为单元格内容:
"""), strip_indent=4)
code_block(r"""
put_table([
['Type', 'Content'],
['html', put_html('X<sup>2</sup>')],
['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]
['buttons', put_buttons(['A', 'B'], onclick=toast)],
['markdown', put_markdown('`Awesome PyWebIO!`')],
['file', put_file('hello.text', b'hello world')],
['table', put_table([['A', 'B'], ['C', 'D']])]
])
""")
put_markdown(t(r"Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:",
r"类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:"), strip_indent=4)
code_block(r"""
popup('Popup title', [
put_html('<h3>Popup Content</h3>'),
'plain html: <br/>', # equal to put_text('plain html: <br/>')
put_table([['A', 'B'], ['C', 'D']]),
put_buttons(['close_popup()'], onclick=lambda _: close_popup())
])
""")
put_markdown(t(r"For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.",
r"更多接受`put_xxx()`作为参数的输出函数请参考函数文档。"))
put_markdown(t(r"""### Callback
PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.
This is an example:%s
The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:
""", r"""### 事件回调
PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。
下面是一个例子:%s
`put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:
""") % """
```python
from functools import partial
def edit_row(choice, row):
put_markdown("> You click`%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
```
""", strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown("> You click `%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
set_scope('table-callback')
put_markdown(t("Of course, PyWebIO also supports outputting individual button:", "当然,PyWebIO还支持单独的按钮控件:")+r"""
```python
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
```
""", strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown(t(r"""### Output Scope
PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.
The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.
Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.
You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s
The above code will generate the following Scope layout:
""", r"""### 输出域Scope
PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。
输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。
每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。
可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s
以上代码将会产生如下Scope布局:
""") % """
```python
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
```
""", strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html("""<style>
#pywebio-scope-A {border: 1px solid red;}
#pywebio-scope-B {border: 1px solid blue;margin:2px}
#pywebio-scope-C {border: 1px solid green;margin-top:2px}
</style><br/>""")
put_markdown(t(r"""The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.
In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:
""", r"""
输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。
此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:
"""), strip_indent=4)
put_grid([
[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('A', scope='A')])],
[put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('B', scope='B')])],
[put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('C', scope='C')])],
], cell_widths='1fr 10px auto')
put_markdown(t("The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.", "输出函数可以使用`position`参数指定内容在Scope中输出的位置") + """
```python
put_text(now(), scope='A', position=...)
```
""", strip_indent=4)
import datetime
put_buttons([('position=%s' % i, i) for i in [1, 2, 3, -1, -2, -3]],
lambda i: put_text(datetime.datetime.now(), position=i, scope='A'), small=True)
put_markdown(t(r"In addition to `use_scope()`, PyWebIO also provides the following scope control functions:",
r"除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: "))
put_grid([
[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [lambda: clear('B')])],
[put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [lambda: remove('C')])],
[put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [lambda: scroll_to('A')])],
], cell_widths='1fr 10px auto')
put_markdown(t(r"""### Layout
In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.
The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:
- `put_row()` : Use row layout to output content. The content is arranged horizontally
- `put_column()` : Use column layout to output content. The content is arranged vertically
- `put_grid()` : Output content using grid layout
Here is an example by combining `put_row()` and `put_column()`:
""", r"""### 布局
一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。
`pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:
- `put_row()` : 使用行布局输出内容. 内容在水平方向上排列
- `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列
- `put_grid()` : 使用网格布局输出内容
比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:
"""), strip_indent=4)
code_block(r"""
put_row([
put_column([
put_code('A'),
put_row([
put_code('B1'), None, # %s
put_code('B2'), None,
put_code('B3'),
]),
put_code('C'),
]), None,
put_code('D'), None,
put_code('E')
])
""" % t('None represents the space between the output', 'None 表示输出之间的空白'))
put_markdown(t(r"""### Style
If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.
You can set the CSS style for a single `put_xxx()` output:
""", r"""### 样式
如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。
可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:
"""), strip_indent=4)
code_block(r"""
style(put_text('Red'), 'color: red')
put_table([
['A', 'B'],
['C', style(put_text('Red'), 'color: red')],
])
""", strip_indent=4)
put_markdown(t(r"`style()` also accepts a list of output calls:", r"`style()` 也接受列表作为输入:"))
code_block(r"""
style([
put_text('Red'),
put_markdown('~~del~~')
], 'color: red')
put_collapse('title', style([
put_text('text'),
put_markdown('~~del~~'),
], 'margin-left: 20px'))
""", strip_indent=4)
put_markdown(t("""----
For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).
""","""----
PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。
"""), lstrip=True)
await hold()
if __name__ == '__main__':
start_server(main, debug=True, port=8080, cdn=False)
| true
| true
|
79027ae2d057de7e199cb76a3e86526a58da79fa
| 501
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/histogram2dcontour/colorbar/_ticks.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/histogram2dcontour/colorbar/_ticks.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/histogram2dcontour/colorbar/_ticks.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticks", parent_name="histogram2dcontour.colorbar", **kwargs
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs
)
| 33.4
| 86
| 0.644711
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticks", parent_name="histogram2dcontour.colorbar", **kwargs
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs
)
| true
| true
|
79027aea4b332b8dfb878d883be7550b71ba06c0
| 297
|
py
|
Python
|
weather.py
|
Geimers228/PyDev
|
bf2997e7a4dd3b780caeafbb5a53e87173e6891b
|
[
"MIT"
] | null | null | null |
weather.py
|
Geimers228/PyDev
|
bf2997e7a4dd3b780caeafbb5a53e87173e6891b
|
[
"MIT"
] | 4
|
2021-01-21T11:41:28.000Z
|
2021-01-21T11:42:30.000Z
|
weather.py
|
Geimers228/PyDev
|
bf2997e7a4dd3b780caeafbb5a53e87173e6891b
|
[
"MIT"
] | null | null | null |
from pyowm import OWM
owm = OWM('21ff51d901692fd3e2f5ecc04d3617f1')
place = input('Input Place: ')
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
wind = w.detailed_status
t = w.temperature('celsius')
print(wind)
print(t)
exit_ = input('')
| 24.75
| 46
| 0.727273
|
from pyowm import OWM
owm = OWM('21ff51d901692fd3e2f5ecc04d3617f1')
place = input('Input Place: ')
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
wind = w.detailed_status
t = w.temperature('celsius')
print(wind)
print(t)
exit_ = input('')
| true
| true
|
79027b4fbb5e97ffdaf75900b2f3121dc7947225
| 7,447
|
py
|
Python
|
mantime/normalisers/clinical_doc_analyser.py
|
filannim/ManTIME
|
5b47476907477c0b2dbbee3ba3a2d47903c4d12b
|
[
"BSD-2-Clause-FreeBSD"
] | 18
|
2015-01-08T13:54:43.000Z
|
2020-11-30T14:17:56.000Z
|
mantime/normalisers/clinical_doc_analyser.py
|
filannim/ManTIME
|
5b47476907477c0b2dbbee3ba3a2d47903c4d12b
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2018-02-14T11:31:57.000Z
|
2021-02-03T07:59:11.000Z
|
mantime/normalisers/clinical_doc_analyser.py
|
filannim/ManTIME
|
5b47476907477c0b2dbbee3ba3a2d47903c4d12b
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2016-08-29T07:28:39.000Z
|
2017-06-05T12:42:43.000Z
|
#!/usr/bin/python
#
# Copyright 2012 Michele Filannino
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# authors: Michele Filannino
# email: filannim@cs.man.ac.uk
#
# This work is part of 2012 i2b2 challenge.
# For details, see www.cs.man.ac.uk/~filannim/
""" It analyses the structure of the clinical document (i2b2 format) and
provides an object which exposes the key dates extracted in the doc.
"""
from __future__ import division
import codecs
from datetime import date as datex
import re
import os
import sys
from timex_clinical import normalise
class DocumentAnalyser(object):
"""Analyses clinical documents
This module reads a txt file and extracts in the form of a key-value pairs
all the clinical temporally relevant information: admission date, discharge
date, operation date and so on.
"""
def __init__(self):
self.admission_signals = ('admission')
self.discharge_signals = ('discharge')
self.operation_signals = ('operating room')
self.transfer_signals = ('transfer')
self.date_syntaxes = ['[0-9][0-9]*[/|-][0-9][0-9]*(?:[/|-][0-9][0-9]*)?|[0-9]{8}|[0-9]{6}|[0-9]{4}']
def analyse(self, path, filename, normalisation=False):
clinical_note = ClinicalDocument()
text = ''
with codecs.open(os.path.join(path, filename)) as file_content:
for line in file_content:
if not re.match("^(?:\]\]>)?<(?:\?|/)?[A-Za-z]+", line):
text += line.lower()
date_refs = [(match.start(), match.end()) for match in re.finditer(self.date_syntaxes[0], text)]
clinical_note.file_name = filename
clinical_note.file_path = path
if normalisation:
clinical_note.admission_date = normalise(self.search_closest(self.admission_signals, date_refs, text, 'forward', 6))[2]
clinical_note.discharge_date = normalise(self.search_closest(self.discharge_signals, date_refs, text, 'forward', 6))[2]
if clinical_note.discharge_date == 'NONE':
clinical_note.discharge_date = normalise(self.search_closest(self.discharge_signals, date_refs, text, 'forward', 50))[2]
clinical_note.operation_date = normalise(self.search_closest(self.operation_signals, date_refs, text, 'both'), clinical_note.admission_date.replace('-', ''))[2]
clinical_note.transfer_date = normalise(self.search_closest(self.transfer_signals, date_refs, text, 'both'), clinical_note.admission_date.replace('-', ''))[2]
clinical_note.course_length = self.get_difference_from_normalised_dates(clinical_note.admission_date, clinical_note.discharge_date)
else:
clinical_note.admission_date = self.search_closest(self.admission_signals, date_refs, text, 'forward', 6)
clinical_note.discharge_date = self.search_closest(self.discharge_signals, date_refs, text, 'forward', 6)
if not clinical_note.discharge_date:
clinical_note.discharge_date = self.search_closest(self.discharge_signals, date_refs, text, 'forward', 50)
clinical_note.discharge_date_not_in_header = True
clinical_note.operation_date = self.search_closest(self.operation_signals, date_refs, text, 'both')
clinical_note.transfer_date = self.search_closest(self.transfer_signals, date_refs, text, 'both')
clinical_note.course_length = self.get_difference_from_normalised_dates(normalise(clinical_note.admission_date)[2], normalise(clinical_note.discharge_date)[2])
return clinical_note
def search_closest(self, object, date_refs, text, direction='both', threshold=10*10):
min_value = 10**10
object_refs = [match.start() for match in re.finditer(re.escape(object), text)]
result = (-1, -1, -1)
for target_word in object_refs:
if direction =='forward': dates = [date for date in date_refs if date[0] > target_word and self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
elif direction =='backward': dates = [date for date in date_refs if date[0] < target_word and self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
else: dates = [date for date in date_refs if self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
for date in dates:
n_of_returns = self.get_number_of_newlines_inside(date[0], target_word, text)
distance = abs(date[0]-target_word)*(n_of_returns+1)
# print n_of_returns, self.get_number_of_newlines_inside(date[0],target_word,text)
if distance<min_value:
result = (target_word,date[0],date[1])
min_value = distance
# print text[date[0]:date[1]]
return text[result[1]:result[2]]
def get_number_of_newlines_inside(self, start, end, text):
return_refs = [match.start() for match in re.finditer(re.escape('\n'),
text)]
result = len([return_pointer for return_pointer in return_refs if
return_pointer < max(start, end) and
return_pointer > min(start, end)])
return result
def get_difference_from_normalised_dates(self, date1, date2):
try:
date1 = date1.split('-')
date2 = date2.split('-')
date1 = datex(int(date1[0]), int(date1[1]), int(date1[2]))
date2 = datex(int(date2[0]), int(date2[1]), int(date2[2]))
return abs((date1-date2).days)
except Exception:
return -1
class ClinicalDocument(object):
"""Document representation
This class synthetically represents a document and all the information
required in order to accomplish the temporal expression normalisation phase.
It contains information about: name, path, admission date, discharge date,
date of the surgical operation, date of the transfer.
"""
def __init__(self, file_name=None, file_path=None, admission=None,
operation=None, transfer=None, discharge=None):
self.file_name = file_name
self.file_path = file_path
self.admission_date = admission
self.discharge_date = discharge
self.operation_date = operation
self.transfer_date = transfer
self.course_length = -1
self.discharge_date_not_in_header = False
self.text = ''
def __str__(self):
output = str(self.file_name.split('/')[-1]) + '\t'
output += 'admission: ' + str(self.admission_date) + '\t'
output += 'operation: ' + str(self.operation_date) + '\t'
output += 'transfer: ' + str(self.transfer_date) + '\t'
output += 'discharge: ' + str(self.discharge_date) + '\t'
output += 'course_length: ' + str(self.course_length) + '\t'
output += 'adm:p\t'
if self.discharge_date_not_in_header:
output += 'dis:a'
else:
output += 'dis:p'
# output += 'Admission date: ' + ''
return output
def main():
path, filename = os.path.split(os.path.abspath(sys.argv[1]))
analyser = DocumentAnalyser()
print analyser.analyse(path, filename, False)
if __name__ == '__main__':
main()
| 48.357143
| 172
| 0.655297
|
""" It analyses the structure of the clinical document (i2b2 format) and
provides an object which exposes the key dates extracted in the doc.
"""
from __future__ import division
import codecs
from datetime import date as datex
import re
import os
import sys
from timex_clinical import normalise
class DocumentAnalyser(object):
"""Analyses clinical documents
This module reads a txt file and extracts in the form of a key-value pairs
all the clinical temporally relevant information: admission date, discharge
date, operation date and so on.
"""
def __init__(self):
self.admission_signals = ('admission')
self.discharge_signals = ('discharge')
self.operation_signals = ('operating room')
self.transfer_signals = ('transfer')
self.date_syntaxes = ['[0-9][0-9]*[/|-][0-9][0-9]*(?:[/|-][0-9][0-9]*)?|[0-9]{8}|[0-9]{6}|[0-9]{4}']
def analyse(self, path, filename, normalisation=False):
clinical_note = ClinicalDocument()
text = ''
with codecs.open(os.path.join(path, filename)) as file_content:
for line in file_content:
if not re.match("^(?:\]\]>)?<(?:\?|/)?[A-Za-z]+", line):
text += line.lower()
date_refs = [(match.start(), match.end()) for match in re.finditer(self.date_syntaxes[0], text)]
clinical_note.file_name = filename
clinical_note.file_path = path
if normalisation:
clinical_note.admission_date = normalise(self.search_closest(self.admission_signals, date_refs, text, 'forward', 6))[2]
clinical_note.discharge_date = normalise(self.search_closest(self.discharge_signals, date_refs, text, 'forward', 6))[2]
if clinical_note.discharge_date == 'NONE':
clinical_note.discharge_date = normalise(self.search_closest(self.discharge_signals, date_refs, text, 'forward', 50))[2]
clinical_note.operation_date = normalise(self.search_closest(self.operation_signals, date_refs, text, 'both'), clinical_note.admission_date.replace('-', ''))[2]
clinical_note.transfer_date = normalise(self.search_closest(self.transfer_signals, date_refs, text, 'both'), clinical_note.admission_date.replace('-', ''))[2]
clinical_note.course_length = self.get_difference_from_normalised_dates(clinical_note.admission_date, clinical_note.discharge_date)
else:
clinical_note.admission_date = self.search_closest(self.admission_signals, date_refs, text, 'forward', 6)
clinical_note.discharge_date = self.search_closest(self.discharge_signals, date_refs, text, 'forward', 6)
if not clinical_note.discharge_date:
clinical_note.discharge_date = self.search_closest(self.discharge_signals, date_refs, text, 'forward', 50)
clinical_note.discharge_date_not_in_header = True
clinical_note.operation_date = self.search_closest(self.operation_signals, date_refs, text, 'both')
clinical_note.transfer_date = self.search_closest(self.transfer_signals, date_refs, text, 'both')
clinical_note.course_length = self.get_difference_from_normalised_dates(normalise(clinical_note.admission_date)[2], normalise(clinical_note.discharge_date)[2])
return clinical_note
def search_closest(self, object, date_refs, text, direction='both', threshold=10*10):
min_value = 10**10
object_refs = [match.start() for match in re.finditer(re.escape(object), text)]
result = (-1, -1, -1)
for target_word in object_refs:
if direction =='forward': dates = [date for date in date_refs if date[0] > target_word and self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
elif direction =='backward': dates = [date for date in date_refs if date[0] < target_word and self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
else: dates = [date for date in date_refs if self.get_number_of_newlines_inside(0, date[0], text) <= threshold]
for date in dates:
n_of_returns = self.get_number_of_newlines_inside(date[0], target_word, text)
distance = abs(date[0]-target_word)*(n_of_returns+1)
if distance<min_value:
result = (target_word,date[0],date[1])
min_value = distance
return text[result[1]:result[2]]
def get_number_of_newlines_inside(self, start, end, text):
return_refs = [match.start() for match in re.finditer(re.escape('\n'),
text)]
result = len([return_pointer for return_pointer in return_refs if
return_pointer < max(start, end) and
return_pointer > min(start, end)])
return result
def get_difference_from_normalised_dates(self, date1, date2):
try:
date1 = date1.split('-')
date2 = date2.split('-')
date1 = datex(int(date1[0]), int(date1[1]), int(date1[2]))
date2 = datex(int(date2[0]), int(date2[1]), int(date2[2]))
return abs((date1-date2).days)
except Exception:
return -1
class ClinicalDocument(object):
"""Document representation
This class synthetically represents a document and all the information
required in order to accomplish the temporal expression normalisation phase.
It contains information about: name, path, admission date, discharge date,
date of the surgical operation, date of the transfer.
"""
def __init__(self, file_name=None, file_path=None, admission=None,
operation=None, transfer=None, discharge=None):
self.file_name = file_name
self.file_path = file_path
self.admission_date = admission
self.discharge_date = discharge
self.operation_date = operation
self.transfer_date = transfer
self.course_length = -1
self.discharge_date_not_in_header = False
self.text = ''
def __str__(self):
output = str(self.file_name.split('/')[-1]) + '\t'
output += 'admission: ' + str(self.admission_date) + '\t'
output += 'operation: ' + str(self.operation_date) + '\t'
output += 'transfer: ' + str(self.transfer_date) + '\t'
output += 'discharge: ' + str(self.discharge_date) + '\t'
output += 'course_length: ' + str(self.course_length) + '\t'
output += 'adm:p\t'
if self.discharge_date_not_in_header:
output += 'dis:a'
else:
output += 'dis:p'
return output
def main():
path, filename = os.path.split(os.path.abspath(sys.argv[1]))
analyser = DocumentAnalyser()
print analyser.analyse(path, filename, False)
if __name__ == '__main__':
main()
| false
| true
|
79027b6b7a1cfb2ee61bdfd3ff61c84d91452fc6
| 2,739
|
py
|
Python
|
src/secondaires/peche/commandes/banc/creer.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/secondaires/peche/commandes/banc/creer.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/secondaires/peche/commandes/banc/creer.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'créer' de la commande 'banc'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.interpreteur.editeur.presentation import Presentation
class PrmCreer(Parametre):
"""Commande 'banc créer'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "creer", "create")
self.schema = "<cle>"
self.aide_courte = "crée un banc de poisson"
self.aide_longue = \
"Cette commande permet de créer un nouveau banc de " \
"poisson. Vous devez préciser en argument la clé identifiant " \
"le banc."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
cle = dic_masques["cle"].cle
if cle in importeur.peche.bancs:
personnage << "|err|Ce banc existe déjà.|ff|"
return
banc = importeur.peche.creer_banc(cle)
editeur = importeur.interpreteur.construire_editeur(
"schooledit", personnage, banc)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| 44.177419
| 79
| 0.71632
|
from primaires.interpreteur.masque.parametre import Parametre
from primaires.interpreteur.editeur.presentation import Presentation
class PrmCreer(Parametre):
def __init__(self):
Parametre.__init__(self, "creer", "create")
self.schema = "<cle>"
self.aide_courte = "crée un banc de poisson"
self.aide_longue = \
"Cette commande permet de créer un nouveau banc de " \
"poisson. Vous devez préciser en argument la clé identifiant " \
"le banc."
def interpreter(self, personnage, dic_masques):
cle = dic_masques["cle"].cle
if cle in importeur.peche.bancs:
personnage << "|err|Ce banc existe déjà.|ff|"
return
banc = importeur.peche.creer_banc(cle)
editeur = importeur.interpreteur.construire_editeur(
"schooledit", personnage, banc)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| true
| true
|
79027bb9c0b17cff34796310291ecb92d89af1cd
| 3,089
|
py
|
Python
|
chir.py/config.py
|
acidvegas/chir.py
|
8e8a04353f9f2f5a5aff244e2908359c9d58a449
|
[
"0BSD"
] | 19
|
2016-09-07T21:06:11.000Z
|
2022-02-22T22:21:27.000Z
|
chir.py/config.py
|
acidvegas/chir.py
|
8e8a04353f9f2f5a5aff244e2908359c9d58a449
|
[
"0BSD"
] | 1
|
2017-06-05T12:47:29.000Z
|
2020-04-10T02:03:09.000Z
|
chir.py/config.py
|
acidvegas/chir.py
|
8e8a04353f9f2f5a5aff244e2908359c9d58a449
|
[
"0BSD"
] | 9
|
2017-03-24T23:17:46.000Z
|
2022-01-09T14:23:39.000Z
|
#!/usr/bin/env python
# Chir.py Twitter Bot - Developed by acidvegas in Python (https://acid.vegas/chir.py)
# config.py
# API Settings
coinurl_uuid = 'CHANGEME'
twitter_consumer_key = 'CHANGEME'
twitter_consumer_secret = 'CHANGEME'
twitter_access_token = 'CHANGEME'
twitter_access_token_secret = 'CHANGEME'
# Keywords & News Sources (DO NOT EDIT)
boost_keywords = ('500aday','autofollow','autofollowback','f4f','follow','follow4follow','followback','followtrain','teamfollowback','wefollowback')
news_feeds = {
'baseball' : 'https://sports.yahoo.com/mlb/rss.xml',
'basketball' : 'https://sports.yahoo.com/nba/rss.xml',
'boxing' : 'https://sports.yahoo.com/box/rss.xml',
'football' : 'https://sports.yahoo.com/nfl/rss.xml',
'golf' : 'https://sports.yahoo.com/golf/rss.xml',
'hockey' : 'https://sports.yahoo.com/nhl/rss.xml',
'mma' : 'https://sports.yahoo.com/mma/rss.xml',
'nascar' : 'https://sports.yahoo.com/nascar/rss.xml',
'soccer' : 'https://sports.yahoo.com/soccer/rss.xml',
'tennis' : 'https://sports.yahoo.com/tennis/rss.xml'
}
news_keywords = {
'baseball' : ('baseball','mlb','homerun','worldseries','springtraining','angels','astros','athletics','bluejays','braves','brewers','cardinals','cubs','diamondbacks','dodgers','giants','indians','mariners','marlins','mets','nationals','orioles','padres','phillies','pirates','rangers','rays','redsox','reds','rockies','royals','tigers','twins','whitesox','yankees'),
'basketball' : ('basketball','finals','nba','76ers','blazers','bucks','bulls','cavaliers','celtics','clippers','grizzlies','hawks','heat','hornets','jazz','kings','knicks','lakers','magic','mavericks','nets','nuggets','pacers','pistons','raptors','rockets','spurs','suns','thunder','timberwolves','warriors','wizards'),
'boxing' : ('boxing','fightnight'),
'football' : ('football','madden','nfl','superbowl','touchdown','49ers','bears','bengals','bills','broncos','browns','bucaneers','cardinals','chargers','cheifs','colts','cowboys','dolphins','eagles','falcons','giants','jaguars','jets','lions','packers','panthers','patriots','raiders','rams','ravens','redskins','saints','seahawks','steelers','texans','titans','vikings'),
'golf' : ('fedexcup','owgr','pga','pgachampionship','pgatour'),
'hockey' : ('hockey','nhl','worldcup','avalanche','blackhawks','bluejackets','blues','bruins','canadiens','canucks','capitals','coyotes','devils','ducks','flames','flyers','hurricanes','islanders','jets','kings','lightning','mapleleafs','oilers','panthers','penguins','predators','rangers','redwings','sabres','senators','sharks','stars','wild'),
'mma' : ('bellator','martialarts','mixedmartialarts','mma','ufc','wsof'),
'nascar' : ('buschseries','campingworldtruckseries','daytona500','iracing','nascar','sprintcup','sprintseries','winstoncup','winstoncupseries','xfinityseries'),
'soccer' : ('fifa','soccer','worldcup'),
'tennis' : ('atp','atpworldtour','masters1000','tennis','usopen')
}
| 77.225
| 378
| 0.655228
|
coinurl_uuid = 'CHANGEME'
twitter_consumer_key = 'CHANGEME'
twitter_consumer_secret = 'CHANGEME'
twitter_access_token = 'CHANGEME'
twitter_access_token_secret = 'CHANGEME'
boost_keywords = ('500aday','autofollow','autofollowback','f4f','follow','follow4follow','followback','followtrain','teamfollowback','wefollowback')
news_feeds = {
'baseball' : 'https://sports.yahoo.com/mlb/rss.xml',
'basketball' : 'https://sports.yahoo.com/nba/rss.xml',
'boxing' : 'https://sports.yahoo.com/box/rss.xml',
'football' : 'https://sports.yahoo.com/nfl/rss.xml',
'golf' : 'https://sports.yahoo.com/golf/rss.xml',
'hockey' : 'https://sports.yahoo.com/nhl/rss.xml',
'mma' : 'https://sports.yahoo.com/mma/rss.xml',
'nascar' : 'https://sports.yahoo.com/nascar/rss.xml',
'soccer' : 'https://sports.yahoo.com/soccer/rss.xml',
'tennis' : 'https://sports.yahoo.com/tennis/rss.xml'
}
news_keywords = {
'baseball' : ('baseball','mlb','homerun','worldseries','springtraining','angels','astros','athletics','bluejays','braves','brewers','cardinals','cubs','diamondbacks','dodgers','giants','indians','mariners','marlins','mets','nationals','orioles','padres','phillies','pirates','rangers','rays','redsox','reds','rockies','royals','tigers','twins','whitesox','yankees'),
'basketball' : ('basketball','finals','nba','76ers','blazers','bucks','bulls','cavaliers','celtics','clippers','grizzlies','hawks','heat','hornets','jazz','kings','knicks','lakers','magic','mavericks','nets','nuggets','pacers','pistons','raptors','rockets','spurs','suns','thunder','timberwolves','warriors','wizards'),
'boxing' : ('boxing','fightnight'),
'football' : ('football','madden','nfl','superbowl','touchdown','49ers','bears','bengals','bills','broncos','browns','bucaneers','cardinals','chargers','cheifs','colts','cowboys','dolphins','eagles','falcons','giants','jaguars','jets','lions','packers','panthers','patriots','raiders','rams','ravens','redskins','saints','seahawks','steelers','texans','titans','vikings'),
'golf' : ('fedexcup','owgr','pga','pgachampionship','pgatour'),
'hockey' : ('hockey','nhl','worldcup','avalanche','blackhawks','bluejackets','blues','bruins','canadiens','canucks','capitals','coyotes','devils','ducks','flames','flyers','hurricanes','islanders','jets','kings','lightning','mapleleafs','oilers','panthers','penguins','predators','rangers','redwings','sabres','senators','sharks','stars','wild'),
'mma' : ('bellator','martialarts','mixedmartialarts','mma','ufc','wsof'),
'nascar' : ('buschseries','campingworldtruckseries','daytona500','iracing','nascar','sprintcup','sprintseries','winstoncup','winstoncupseries','xfinityseries'),
'soccer' : ('fifa','soccer','worldcup'),
'tennis' : ('atp','atpworldtour','masters1000','tennis','usopen')
}
| true
| true
|
79027d5656a0e4c7fcfda4f18cf23cff06e1a50e
| 11,852
|
py
|
Python
|
src/oscar/apps/offer/benefits.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 3
|
2020-03-30T13:11:57.000Z
|
2020-04-22T13:55:31.000Z
|
src/oscar/apps/offer/benefits.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 9
|
2020-10-29T08:03:28.000Z
|
2021-09-08T01:21:10.000Z
|
src/oscar/apps/offer/benefits.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 2
|
2021-01-06T19:25:07.000Z
|
2021-05-14T02:00:19.000Z
|
from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_class, get_classes, get_model
from oscar.templatetags.currency_filters import currency
Benefit = get_model('offer', 'Benefit')
BasketDiscount, SHIPPING_DISCOUNT, ZERO_DISCOUNT = get_classes('offer.results', [
'BasketDiscount', 'SHIPPING_DISCOUNT', 'ZERO_DISCOUNT'])
CoverageCondition, ValueCondition = get_classes('offer.conditions', ['CoverageCondition', 'ValueCondition'])
range_anchor = get_class('offer.utils', 'range_anchor')
__all__ = [
'PercentageDiscountBenefit', 'AbsoluteDiscountBenefit', 'FixedPriceBenefit',
'ShippingBenefit', 'MultibuyDiscountBenefit',
'ShippingAbsoluteDiscountBenefit', 'ShippingFixedPriceBenefit',
'ShippingPercentageDiscountBenefit',
]
def apply_discount(line, discount, quantity, offer=None):
"""
Apply a given discount to the passed basket
"""
line.discount(discount, quantity, incl_tax=False, offer=offer)
class PercentageDiscountBenefit(Benefit):
"""
An offer benefit that gives a percentage discount
"""
_description = _("%(value)s%% discount on %(range)s")
@property
def name(self):
return self._description % {
'value': self.value,
'range': self.range.name}
@property
def description(self):
return self._description % {
'value': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer, discount_percent=None,
max_total_discount=None):
if discount_percent is None:
discount_percent = self.value
discount_amount_available = max_total_discount
line_tuples = self.get_applicable_lines(offer, basket)
discount_percent = min(discount_percent, D('100.0'))
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(
line.quantity_without_offer_discount(offer),
max_affected_items - affected_items)
line_discount = self.round(discount_percent / D('100.0') * price
* int(quantity_affected))
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
apply_discount(line, line_discount, quantity_affected, offer)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class AbsoluteDiscountBenefit(Benefit):
"""
An offer benefit that gives an absolute discount
"""
_description = _("%(value)s discount on %(range)s")
@property
def name(self):
return self._description % {
'value': currency(self.value),
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'value': currency(self.value),
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer, discount_amount=None,
max_total_discount=None):
if discount_amount is None:
discount_amount = self.value
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket)
# Determine which lines can have the discount applied to them
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D('0.00')
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(
line.quantity_without_offer_discount(offer),
max_affected_items - num_affected_items)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# Ensure we don't try to apply a discount larger than the total of the
# matching items.
discount = min(discount_amount, affected_items_total)
if max_total_discount is not None:
discount = min(discount, max_total_discount)
if discount == 0:
return ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D('0.00')
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
# rounding.
line_discount = discount - applied_discount
else:
# Calculate a weighted discount for the line
line_discount = self.round(
((price * qty) / affected_items_total) * discount)
apply_discount(line, line_discount, qty, offer)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class FixedPriceBenefit(Benefit):
"""
An offer benefit that gives the items in the condition for a
fixed price. This is useful for "bundle" offers.
Note that we ignore the benefit range here and only give a fixed price
for the products in the condition range. The condition cannot be a value
condition.
We also ignore the max_affected_items setting.
"""
_description = _("The products that meet the condition are sold "
"for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer): # noqa (too complex (10))
if isinstance(condition, ValueCondition):
return ZERO_DISCOUNT
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket,
range=condition.range)
if not line_tuples:
return ZERO_DISCOUNT
# Determine the lines to consume
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_offer_discount(offer),
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return ZERO_DISCOUNT
# Apply discount to the affected lines
discount_applied = D('0.00')
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
apply_discount(line, line_discount, quantity, offer)
discount_applied += line_discount
return BasketDiscount(discount)
class MultibuyDiscountBenefit(Benefit):
_description = _("Cheapest product from %(range)s is free")
@property
def name(self):
return self._description % {
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer):
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Cheapest line gives free product
discount, line = line_tuples[0]
apply_discount(line, discount, 1, offer)
affected_lines = [(line, discount, 1)]
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, affected_lines=())
return SHIPPING_DISCOUNT
class Meta:
app_label = 'offer'
proxy = True
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
_description = _("%(amount)s off shipping cost")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
_description = _("Get shipping for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
_description = _("%(value)s%% off of shipping cost")
@property
def name(self):
return self._description % {
'value': self.value}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
discount = charge * self.value / D('100.0')
return discount.quantize(D('0.01'))
| 33.862857
| 108
| 0.626645
|
from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_class, get_classes, get_model
from oscar.templatetags.currency_filters import currency
Benefit = get_model('offer', 'Benefit')
BasketDiscount, SHIPPING_DISCOUNT, ZERO_DISCOUNT = get_classes('offer.results', [
'BasketDiscount', 'SHIPPING_DISCOUNT', 'ZERO_DISCOUNT'])
CoverageCondition, ValueCondition = get_classes('offer.conditions', ['CoverageCondition', 'ValueCondition'])
range_anchor = get_class('offer.utils', 'range_anchor')
__all__ = [
'PercentageDiscountBenefit', 'AbsoluteDiscountBenefit', 'FixedPriceBenefit',
'ShippingBenefit', 'MultibuyDiscountBenefit',
'ShippingAbsoluteDiscountBenefit', 'ShippingFixedPriceBenefit',
'ShippingPercentageDiscountBenefit',
]
def apply_discount(line, discount, quantity, offer=None):
line.discount(discount, quantity, incl_tax=False, offer=offer)
class PercentageDiscountBenefit(Benefit):
_description = _("%(value)s%% discount on %(range)s")
@property
def name(self):
return self._description % {
'value': self.value,
'range': self.range.name}
@property
def description(self):
return self._description % {
'value': self.value,
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer, discount_percent=None,
max_total_discount=None):
if discount_percent is None:
discount_percent = self.value
discount_amount_available = max_total_discount
line_tuples = self.get_applicable_lines(offer, basket)
discount_percent = min(discount_percent, D('100.0'))
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(
line.quantity_without_offer_discount(offer),
max_affected_items - affected_items)
line_discount = self.round(discount_percent / D('100.0') * price
* int(quantity_affected))
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
apply_discount(line, line_discount, quantity_affected, offer)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class AbsoluteDiscountBenefit(Benefit):
_description = _("%(value)s discount on %(range)s")
@property
def name(self):
return self._description % {
'value': currency(self.value),
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'value': currency(self.value),
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer, discount_amount=None,
max_total_discount=None):
if discount_amount is None:
discount_amount = self.value
line_tuples = self.get_applicable_lines(offer, basket)
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D('0.00')
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(
line.quantity_without_offer_discount(offer),
max_affected_items - num_affected_items)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# matching items.
discount = min(discount_amount, affected_items_total)
if max_total_discount is not None:
discount = min(discount, max_total_discount)
if discount == 0:
return ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D('0.00')
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
line_discount = discount - applied_discount
else:
line_discount = self.round(
((price * qty) / affected_items_total) * discount)
apply_discount(line, line_discount, qty, offer)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class FixedPriceBenefit(Benefit):
_description = _("The products that meet the condition are sold "
"for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer):
if isinstance(condition, ValueCondition):
return ZERO_DISCOUNT
line_tuples = self.get_applicable_lines(offer, basket,
range=condition.range)
if not line_tuples:
return ZERO_DISCOUNT
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_offer_discount(offer),
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return ZERO_DISCOUNT
discount_applied = D('0.00')
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
apply_discount(line, line_discount, quantity, offer)
discount_applied += line_discount
return BasketDiscount(discount)
class MultibuyDiscountBenefit(Benefit):
_description = _("Cheapest product from %(range)s is free")
@property
def name(self):
return self._description % {
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'range': range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer):
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Cheapest line gives free product
discount, line = line_tuples[0]
apply_discount(line, discount, 1, offer)
affected_lines = [(line, discount, 1)]
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, affected_lines=())
return SHIPPING_DISCOUNT
class Meta:
app_label = 'offer'
proxy = True
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
_description = _("%(amount)s off shipping cost")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
_description = _("Get shipping for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
_description = _("%(value)s%% off of shipping cost")
@property
def name(self):
return self._description % {
'value': self.value}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
discount = charge * self.value / D('100.0')
return discount.quantize(D('0.01'))
| true
| true
|
7902803ea0a919e46993314e8f9f69c6c1b53b90
| 8,941
|
py
|
Python
|
TASSELpy/java/lang/Long.py
|
er432/TASSELpy
|
2273d2252786679e023d1279f0c717a29ddd6d35
|
[
"BSD-3-Clause"
] | 1
|
2015-11-30T21:54:19.000Z
|
2015-11-30T21:54:19.000Z
|
TASSELpy/java/lang/Long.py
|
er432/TASSELpy
|
2273d2252786679e023d1279f0c717a29ddd6d35
|
[
"BSD-3-Clause"
] | null | null | null |
TASSELpy/java/lang/Long.py
|
er432/TASSELpy
|
2273d2252786679e023d1279f0c717a29ddd6d35
|
[
"BSD-3-Clause"
] | null | null | null |
from TASSELpy.java.lang.Number import Number, metaNumber
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.utils.DocInherit import DocInherit
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.helper import make_sig
from abc import ABCMeta
import numpy as np
java_imports = {'Long':'java/lang/Long',
'String':'java/lang/String'}
class metaLong:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C == np.int64:
return True
elif C == np.uint64:
return True
elif issubclass(C,Long):
return True
elif issubclass(C,long):
return True
else:
return False
## Wrapper class for java.lang.Long
class Long(Comparable, Number):
"""
Wrapper class for java.lang.Long
"""
_java_name = java_imports['Long']
@javaConstructorOverload(java_imports['Long'],
(make_sig(['long'],'void'),(metaLong,)),
(make_sig([java_imports['String']],'void'),(str,)))
def __init__(self, *args, **kwargs):
"""
Instantiates a new Long
Signatures:
Long(long value)
Long(String s)
Arguments:
Long(long value)
value -- The long to wrap in the object
Long (String s)
s -- The string representing the long
"""
super(Long, self).__init__(*args, generic=(Long,), **kwargs)
@DocInherit
@javaOverload("compareTo",
(make_sig([java_imports['Long']],'int'),(metaLong,),None))
def compareTo(self, *args):
pass
###################################
## Numeric magic methods
###################################
def __pos__(self):
return Long(+self.toPrimative())
def __neg__(self):
return Long(-self.toPrimative())
def __abs__(self):
return Long(abs(self.toPrimativelongValue()))
def __invert__(self):
return Long(~self.toPrimative())
def __floor__(self):
return Long(np.floor(self.toPrimative()))
def __ceil__(self):
return Long(np.ceil(self.toPrimative()))
###################################
## Arithmetic magic methods
###################################
def __add__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() + other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() + other))
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() - other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() - other))
def __rsub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative()-self.toPrimative()))
else:
return Long(np.int64(other-self.toPrimative()))
def __isub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() * other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() * other))
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
return self.__mul__(other)
def __floordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() // other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() // other))
def __rfloordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() // self.toPrimative()))
else:
return Long(np.int64(other // self.toPrimative()))
def __ifloordiv__(self, other):
return self.__floordiv__(other)
def __div__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() / other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() / other))
def __rdiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() / self.toPrimative()))
else:
return Long(np.int64(other / self.toPrimative()))
def __idiv__(self, other):
return self.__div__(other)
def __mod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() % other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() % other))
def __rmod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() % self.toPrimative()))
else:
return Long(np.int64(other % self.toPrimative()))
def __imod__(self, other):
return self.__mod__(other)
def __pow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ** other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ** other))
def __rpow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() ** self.toPrimative()))
else:
return Long(np.int64(other ** self.toPrimative()))
def __ipow__(self, other):
return self.__pow__(other)
def __lshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() << other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() << other))
def __rlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() << self.toPrimative()))
else:
return Long(np.int64(other << self.toPrimative()))
def __ilshift__(self, other):
return self.__lshift__(other)
def __rshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() >> other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() >> other))
def __rrlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() >> self.toPrimative()))
else:
return Long(np.int64(other >> self.toPrimative()))
def __irshift__(self, other):
return self.__rshift__(other)
def __and__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() & other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() & other))
def __rand__(self, other):
return self.__and__(other)
def __iand__(self, other):
return self.__and__(other)
def __or__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() | other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() | other))
def __ror__(self, other):
return self.__or__(other)
def __ior__(self, other):
return self.__or__(other)
def __xor__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ^ other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ^ other))
def __rxor__(self, other):
return self.__xor__(other)
def __ixor__(self, other):
return self.__xor__(other)
def __repr__(self):
return "Long(%d)" % self.longValue()
@DocInherit
def toPrimative(self):
return self.longValue()
| 38.705628
| 80
| 0.581926
|
from TASSELpy.java.lang.Number import Number, metaNumber
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.utils.DocInherit import DocInherit
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.helper import make_sig
from abc import ABCMeta
import numpy as np
java_imports = {'Long':'java/lang/Long',
'String':'java/lang/String'}
class metaLong:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C == np.int64:
return True
elif C == np.uint64:
return True
elif issubclass(C,Long):
return True
elif issubclass(C,long):
return True
else:
return False
_java_name = java_imports['Long']
@javaConstructorOverload(java_imports['Long'],
(make_sig(['long'],'void'),(metaLong,)),
(make_sig([java_imports['String']],'void'),(str,)))
def __init__(self, *args, **kwargs):
super(Long, self).__init__(*args, generic=(Long,), **kwargs)
@DocInherit
@javaOverload("compareTo",
(make_sig([java_imports['Long']],'int'),(metaLong,),None))
def compareTo(self, *args):
pass
er // self.toPrimative()))
def __ifloordiv__(self, other):
return self.__floordiv__(other)
def __div__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() / other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() / other))
def __rdiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() / self.toPrimative()))
else:
return Long(np.int64(other / self.toPrimative()))
def __idiv__(self, other):
return self.__div__(other)
def __mod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() % other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() % other))
def __rmod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() % self.toPrimative()))
else:
return Long(np.int64(other % self.toPrimative()))
def __imod__(self, other):
return self.__mod__(other)
def __pow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ** other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ** other))
def __rpow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() ** self.toPrimative()))
else:
return Long(np.int64(other ** self.toPrimative()))
def __ipow__(self, other):
return self.__pow__(other)
def __lshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() << other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() << other))
def __rlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() << self.toPrimative()))
else:
return Long(np.int64(other << self.toPrimative()))
def __ilshift__(self, other):
return self.__lshift__(other)
def __rshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() >> other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() >> other))
def __rrlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() >> self.toPrimative()))
else:
return Long(np.int64(other >> self.toPrimative()))
def __irshift__(self, other):
return self.__rshift__(other)
def __and__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() & other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() & other))
def __rand__(self, other):
return self.__and__(other)
def __iand__(self, other):
return self.__and__(other)
def __or__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() | other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() | other))
def __ror__(self, other):
return self.__or__(other)
def __ior__(self, other):
return self.__or__(other)
def __xor__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ^ other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ^ other))
def __rxor__(self, other):
return self.__xor__(other)
def __ixor__(self, other):
return self.__xor__(other)
def __repr__(self):
return "Long(%d)" % self.longValue()
@DocInherit
def toPrimative(self):
return self.longValue()
| true
| true
|
7902819fa6455e54dcf6b8bffe127fb94f73c00a
| 4,462
|
py
|
Python
|
slaid/renderers.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | null | null | null |
slaid/renderers.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | null | null | null |
slaid/renderers.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | null | null | null |
import abc
import json
import logging
import os
from typing import Any, Tuple, Union
import numpy as np
import tifffile
import tiledb
import zarr
from slaid.commons import Mask, BasicSlide
from slaid.commons.base import Polygon
from slaid.commons.ecvl import BasicSlide as EcvlSlide
logger = logging.getLogger(__file__)
class Renderer(abc.ABC):
@abc.abstractmethod
def render(
self,
array: np.ndarray,
filename: str,
):
pass
class TiffRenderer(Renderer):
def __init__(self,
tile_size: Tuple[int, int] = (256, 256),
rgb: bool = True,
bigtiff=True):
self.tile_size = tile_size
self.channels = 4 if rgb else 2
self.rgb = rgb
self.bigtiff = bigtiff
def _tiles(self, data: np.ndarray) -> np.ndarray:
for y in range(0, data.shape[0], self.tile_size[0]):
for x in range(0, data.shape[1], self.tile_size[1]):
tile = data[y:y + self.tile_size[0], x:x + self.tile_size[1]]
if tile.shape[:2] != self.tile_size:
pad = (
(0, self.tile_size[0] - tile.shape[0]),
(0, self.tile_size[1] - tile.shape[1]),
)
tile = np.pad(tile, pad, 'constant')
final_tile = np.zeros(
(tile.shape[0], tile.shape[1], self.channels),
dtype='uint8')
final_tile[:, :, 0] = tile * 255
final_tile[final_tile[:, :, 0] > 255 / 10,
self.channels - 1] = 255
yield final_tile
def render(self, array: np.ndarray, filename: str):
with tifffile.TiffWriter(filename, bigtiff=self.bigtiff) as tif:
tif.save(self._tiles(array),
dtype='uint8',
shape=(array.shape[0], array.shape[1], self.channels),
tile=self.tile_size,
photometric='rgb' if self.rgb else 'minisblack',
extrasamples=('ASSOCALPHA', ))
class BaseJSONEncoder(abc.ABC):
@abc.abstractproperty
def target(self):
pass
def encode(self, obj: Any):
pass
class NumpyArrayJSONEncoder(BaseJSONEncoder):
@property
def target(self):
return np.ndarray
def encode(self, array: np.ndarray):
return array.tolist()
class PolygonJSONEncoder(BaseJSONEncoder):
@property
def target(self):
return Polygon
def encode(self, obj: Polygon):
return obj.coords
class Int64JSONEncoder(BaseJSONEncoder):
@property
def target(self):
return np.int64
def encode(self, int_: np.int64):
return int(int_)
# from https://github.com/hmallen/numpyencoder
def convert_numpy_types(obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, (np.ndarray, )):
return obj.tolist()
elif isinstance(obj, (np.bool_)):
return bool(obj)
elif isinstance(obj, (np.void)):
return None
return obj
class JSONEncoder(json.JSONEncoder):
encoders = [NumpyArrayJSONEncoder(), PolygonJSONEncoder()]
def default(self, obj):
encoded = None
for encoder in self.encoders:
if isinstance(obj, encoder.target):
encoded = encoder.encode(obj)
break
if encoded is None:
encoded = super().default(obj)
return encoded
class VectorialRenderer(Renderer):
def render(self,
slide: BasicSlide,
filename: str,
one_file_per_patch: bool = False):
if one_file_per_patch:
raise NotImplementedError()
with open(filename, 'w') as json_file:
json.dump(slide.patches, json_file, cls=JSONEncoder)
def to_json(obj: Any, filename: str = None) -> Union[str, None]:
if filename is not None:
with open(filename, 'w') as f:
json.dump(obj, f, cls=JSONEncoder)
else:
return json.dumps(obj, cls=JSONEncoder)
| 28.062893
| 79
| 0.575751
|
import abc
import json
import logging
import os
from typing import Any, Tuple, Union
import numpy as np
import tifffile
import tiledb
import zarr
from slaid.commons import Mask, BasicSlide
from slaid.commons.base import Polygon
from slaid.commons.ecvl import BasicSlide as EcvlSlide
logger = logging.getLogger(__file__)
class Renderer(abc.ABC):
@abc.abstractmethod
def render(
self,
array: np.ndarray,
filename: str,
):
pass
class TiffRenderer(Renderer):
def __init__(self,
tile_size: Tuple[int, int] = (256, 256),
rgb: bool = True,
bigtiff=True):
self.tile_size = tile_size
self.channels = 4 if rgb else 2
self.rgb = rgb
self.bigtiff = bigtiff
def _tiles(self, data: np.ndarray) -> np.ndarray:
for y in range(0, data.shape[0], self.tile_size[0]):
for x in range(0, data.shape[1], self.tile_size[1]):
tile = data[y:y + self.tile_size[0], x:x + self.tile_size[1]]
if tile.shape[:2] != self.tile_size:
pad = (
(0, self.tile_size[0] - tile.shape[0]),
(0, self.tile_size[1] - tile.shape[1]),
)
tile = np.pad(tile, pad, 'constant')
final_tile = np.zeros(
(tile.shape[0], tile.shape[1], self.channels),
dtype='uint8')
final_tile[:, :, 0] = tile * 255
final_tile[final_tile[:, :, 0] > 255 / 10,
self.channels - 1] = 255
yield final_tile
def render(self, array: np.ndarray, filename: str):
with tifffile.TiffWriter(filename, bigtiff=self.bigtiff) as tif:
tif.save(self._tiles(array),
dtype='uint8',
shape=(array.shape[0], array.shape[1], self.channels),
tile=self.tile_size,
photometric='rgb' if self.rgb else 'minisblack',
extrasamples=('ASSOCALPHA', ))
class BaseJSONEncoder(abc.ABC):
@abc.abstractproperty
def target(self):
pass
def encode(self, obj: Any):
pass
class NumpyArrayJSONEncoder(BaseJSONEncoder):
@property
def target(self):
return np.ndarray
def encode(self, array: np.ndarray):
return array.tolist()
class PolygonJSONEncoder(BaseJSONEncoder):
@property
def target(self):
return Polygon
def encode(self, obj: Polygon):
return obj.coords
class Int64JSONEncoder(BaseJSONEncoder):
@property
def target(self):
return np.int64
def encode(self, int_: np.int64):
return int(int_)
def convert_numpy_types(obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.complex_, np.complex64, np.complex128)):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, (np.ndarray, )):
return obj.tolist()
elif isinstance(obj, (np.bool_)):
return bool(obj)
elif isinstance(obj, (np.void)):
return None
return obj
class JSONEncoder(json.JSONEncoder):
encoders = [NumpyArrayJSONEncoder(), PolygonJSONEncoder()]
def default(self, obj):
encoded = None
for encoder in self.encoders:
if isinstance(obj, encoder.target):
encoded = encoder.encode(obj)
break
if encoded is None:
encoded = super().default(obj)
return encoded
class VectorialRenderer(Renderer):
def render(self,
slide: BasicSlide,
filename: str,
one_file_per_patch: bool = False):
if one_file_per_patch:
raise NotImplementedError()
with open(filename, 'w') as json_file:
json.dump(slide.patches, json_file, cls=JSONEncoder)
def to_json(obj: Any, filename: str = None) -> Union[str, None]:
if filename is not None:
with open(filename, 'w') as f:
json.dump(obj, f, cls=JSONEncoder)
else:
return json.dumps(obj, cls=JSONEncoder)
| true
| true
|
79028287689454901d5359c564211cf1812ce3df
| 1,260
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/FittingAngleUsage.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class FittingAngleUsage(Enum, IComparable, IFormattable, IConvertible):
"""
An enumerated type representing the options for how to limit the angle values applicable to fitting content.
enum FittingAngleUsage,values: UseAnAngleIncrement (1),UseAnyAngle (0),UseSpecificAngles (2)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
UseAnAngleIncrement = None
UseAnyAngle = None
UseSpecificAngles = None
value__ = None
| 26.808511
| 221
| 0.600794
|
class FittingAngleUsage(Enum, IComparable, IFormattable, IConvertible):
def __eq__(self, *args):
pass
def __format__(self, *args):
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
UseAnAngleIncrement = None
UseAnyAngle = None
UseSpecificAngles = None
value__ = None
| true
| true
|
7902838fffac61c0b81475eecdd93f380737dc38
| 154
|
py
|
Python
|
server/src/config.py
|
sz-piotr/fioletowe-pomarancze
|
14e748041b8022709999a39f1a70788981f5ef14
|
[
"MIT"
] | null | null | null |
server/src/config.py
|
sz-piotr/fioletowe-pomarancze
|
14e748041b8022709999a39f1a70788981f5ef14
|
[
"MIT"
] | null | null | null |
server/src/config.py
|
sz-piotr/fioletowe-pomarancze
|
14e748041b8022709999a39f1a70788981f5ef14
|
[
"MIT"
] | null | null | null |
class DefaultConfig(object):
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
API_PREFIX = '/api'
| 25.666667
| 42
| 0.74026
|
class DefaultConfig(object):
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
API_PREFIX = '/api'
| true
| true
|
790284c626d7c1ecc60f1be7c3427b8445e03415
| 3,389
|
py
|
Python
|
profiles_project/settings.py
|
Mukul-agrawal/profiles-rest-api
|
4fea09e742b055129af319ef5c7a5969641e93a9
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
Mukul-agrawal/profiles-rest-api
|
4fea09e742b055129af319ef5c7a5969641e93a9
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
Mukul-agrawal/profiles-rest-api
|
4fea09e742b055129af319ef5c7a5969641e93a9
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-t#@y8e6d21m2+#l#m00+pi&d0eyqa2a6v09hle&!6di(d4th*0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.870229
| 91
| 0.705223
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-t#@y8e6d21m2+#l#m00+pi&d0eyqa2a6v09hle&!6di(d4th*0'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true
| true
|
7902857cb0ae8a3366f0e139dd751dbc41260739
| 2,687
|
py
|
Python
|
server/djangoapp/models.py
|
manojarum/agfzb-CloudAppDevelopment_Capstone
|
52b1a52d897821b22514962cc41f6e859aef18b5
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/models.py
|
manojarum/agfzb-CloudAppDevelopment_Capstone
|
52b1a52d897821b22514962cc41f6e859aef18b5
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/models.py
|
manojarum/agfzb-CloudAppDevelopment_Capstone
|
52b1a52d897821b22514962cc41f6e859aef18b5
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.utils.timezone import now
# Create your models here.
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
# - Name
# - Description
# - Any other fields you would like to include in car make model
# - __str__ method to print a car make object
class CarMake(models.Model):
name = models.CharField(null=False, max_length=30, default='')
description = models.CharField(max_length=1000)
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
# - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field)
# - Name
# - Dealer id, used to refer a dealer created in cloudant database
# - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.)
# - Year (DateField)
# - Any other fields you would like to include in car model
# - __str__ method to print a car make object
class CarModel(models.Model):
SEDAN = 'sedan'
SUV= 'suv'
WAGON = 'wagon'
TYPE_CHOICES = [
(SEDAN, 'Sedan'),
(SUV, 'Suv'),
(WAGON, 'Wagon')
]
model = models.ForeignKey(CarMake, on_delete=models.CASCADE)
dealerId = models.IntegerField(default=0)
type = models.CharField(
null=False,
max_length=20,
choices=TYPE_CHOICES,
default=SEDAN
)
title = models.CharField(max_length=200, default="title")
date = models.DateField(null=True)
def __str__(self):
return "title: " + self.title
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
# Dealer address
self.address = address
# Dealer city
self.city = city
# Dealer Full Name
self.full_name = full_name
# Dealer id
self.id = id
# Location lat
self.lat = lat
# Location long
self.long = long
# Dealer short name
self.short_name = short_name
# Dealer state
self.st = st
# Dealer zip
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview:
def __init__(self, name, dealership, review, purchase, sentiment):
self.name = name
self.dealership = dealership
self.review = review
self.purchase = purchase
def __str__(self):
return "Review: " + self.review
| 31.611765
| 105
| 0.639375
|
from django.db import models
from django.utils.timezone import now
class CarMake(models.Model):
name = models.CharField(null=False, max_length=30, default='')
description = models.CharField(max_length=1000)
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
class CarModel(models.Model):
SEDAN = 'sedan'
SUV= 'suv'
WAGON = 'wagon'
TYPE_CHOICES = [
(SEDAN, 'Sedan'),
(SUV, 'Suv'),
(WAGON, 'Wagon')
]
model = models.ForeignKey(CarMake, on_delete=models.CASCADE)
dealerId = models.IntegerField(default=0)
type = models.CharField(
null=False,
max_length=20,
choices=TYPE_CHOICES,
default=SEDAN
)
title = models.CharField(max_length=200, default="title")
date = models.DateField(null=True)
def __str__(self):
return "title: " + self.title
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
self.address = address
self.city = city
self.full_name = full_name
self.id = id
self.lat = lat
self.long = long
self.short_name = short_name
self.st = st
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name
class DealerReview:
def __init__(self, name, dealership, review, purchase, sentiment):
self.name = name
self.dealership = dealership
self.review = review
self.purchase = purchase
def __str__(self):
return "Review: " + self.review
| true
| true
|
79028670a51d84b6fe3c552fa4eb1f636541edae
| 2,279
|
py
|
Python
|
lingvo/core/ops/random_ops_test.py
|
muntasir2000/lingvo
|
1555299b817288b5a6637ded416dbbdc9b00036d
|
[
"Apache-2.0"
] | 1
|
2019-07-11T10:14:30.000Z
|
2019-07-11T10:14:30.000Z
|
lingvo/core/ops/random_ops_test.py
|
CelineQiQi/lingvo
|
4c6405a3c8b29764918dbfb599212dd7620ccf9c
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/ops/random_ops_test.py
|
CelineQiQi/lingvo
|
4c6405a3c8b29764918dbfb599212dd7620ccf9c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import test_utils
from lingvo.core.ops import py_x_ops
from six.moves import range
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class RandomOpsTest(test_utils.TestCase):
def testRandomPermutationSequenceRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=True)
remaining = list(range(20))
for _ in range(10):
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(len(vals), 21)
# Contains all the remaining values from previous epoch.
for x in remaining:
vals.remove(x) # Raises exception if x is not in vals.
# Remaining items have no duplicates.
self.assertEqual(len(vals), len(set(vals)))
remaining = list(set(range(20)) - set(vals))
def testRandomPermutationSequenceNoRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=False)
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(list(range(20)), sorted(vals))
# repeat=False. We should see OutOfRange error.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(out)
if __name__ == '__main__':
tf.test.main()
| 34.530303
| 80
| 0.678806
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import test_utils
from lingvo.core.ops import py_x_ops
from six.moves import range
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class RandomOpsTest(test_utils.TestCase):
def testRandomPermutationSequenceRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=True)
remaining = list(range(20))
for _ in range(10):
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(len(vals), 21)
for x in remaining:
vals.remove(x)
self.assertEqual(len(vals), len(set(vals)))
remaining = list(set(range(20)) - set(vals))
def testRandomPermutationSequenceNoRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=False)
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(list(range(20)), sorted(vals))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(out)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
790288c520c0b2ee07dec24dc8623c84a067fba4
| 714
|
py
|
Python
|
perf_tests/cp_perf_test.py
|
ManuelaS/lifelines
|
e48983550254625ab7e8a3747dd02b646a1bf7ad
|
[
"MIT"
] | null | null | null |
perf_tests/cp_perf_test.py
|
ManuelaS/lifelines
|
e48983550254625ab7e8a3747dd02b646a1bf7ad
|
[
"MIT"
] | null | null | null |
perf_tests/cp_perf_test.py
|
ManuelaS/lifelines
|
e48983550254625ab7e8a3747dd02b646a1bf7ad
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# cox regression
if __name__ == "__main__":
import pandas as pd
import time
import numpy as np
from lifelines import CoxPHFitter
from lifelines.datasets import load_rossi, load_regression_dataset
reps = 1
df = load_rossi()
df = pd.concat([df] * reps)
cp_breslow = CoxPHFitter(penalizer=0.01, l1_ratio=0.0, baseline_estimation_method="breslow")
start_time = time.time()
cp_breslow.fit(df, duration_col="week", event_col="arrest", show_progress=True)
print("--- %s seconds ---" % (time.time() - start_time))
cp_breslow.print_summary(2)
print(cp_breslow.score(df))
print(cp_breslow.score(df, scoring_method="concordance_index"))
| 31.043478
| 96
| 0.689076
|
if __name__ == "__main__":
import pandas as pd
import time
import numpy as np
from lifelines import CoxPHFitter
from lifelines.datasets import load_rossi, load_regression_dataset
reps = 1
df = load_rossi()
df = pd.concat([df] * reps)
cp_breslow = CoxPHFitter(penalizer=0.01, l1_ratio=0.0, baseline_estimation_method="breslow")
start_time = time.time()
cp_breslow.fit(df, duration_col="week", event_col="arrest", show_progress=True)
print("--- %s seconds ---" % (time.time() - start_time))
cp_breslow.print_summary(2)
print(cp_breslow.score(df))
print(cp_breslow.score(df, scoring_method="concordance_index"))
| true
| true
|
790289a305ff3ef427083434bb77f37bb248c750
| 401
|
py
|
Python
|
avocadobites/avocadobites/asgi.py
|
sanjuop/PatrioticPictures
|
e14aaa4106cc310d4cd69968ca41b033a4f4884f
|
[
"MIT"
] | null | null | null |
avocadobites/avocadobites/asgi.py
|
sanjuop/PatrioticPictures
|
e14aaa4106cc310d4cd69968ca41b033a4f4884f
|
[
"MIT"
] | null | null | null |
avocadobites/avocadobites/asgi.py
|
sanjuop/PatrioticPictures
|
e14aaa4106cc310d4cd69968ca41b033a4f4884f
|
[
"MIT"
] | null | null | null |
"""
ASGI config for avocadobites project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings')
application = get_asgi_application()
| 23.588235
| 78
| 0.790524
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings')
application = get_asgi_application()
| true
| true
|
79028a174225260b671df8c8ac4560369e16c2c8
| 710
|
py
|
Python
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | 3
|
2017-07-23T11:11:23.000Z
|
2020-11-30T15:36:51.000Z
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | 15
|
2018-01-05T17:18:34.000Z
|
2021-12-13T17:40:25.000Z
|
tests/test_issues/test_member_example.py
|
hsolbrig/pyjsg
|
5ef46d9af6a94a0cd0e91ebf8b22f61c17e78429
|
[
"CC0-1.0"
] | null | null | null |
import unittest
from pyjsg.validate_json import JSGPython
class MemberExampleTestCase(unittest.TestCase):
def test1(self):
x = JSGPython('''doc {
last_name : @string, # exactly one last name of type string
first_name : @string+ # array or one or more first names
age : @int?, # optional age of type int
weight : @number* # array of zero or more weights
}
''')
rslts = x.conforms('''
{ "last_name" : "snooter",
"first_name" : ["grunt", "peter"],
"weight" : []
}''')
self.assertTrue(rslts.success)
if __name__ == '__main__':
unittest.main()
| 28.4
| 77
| 0.533803
|
import unittest
from pyjsg.validate_json import JSGPython
class MemberExampleTestCase(unittest.TestCase):
def test1(self):
x = JSGPython('''doc {
last_name : @string, # exactly one last name of type string
first_name : @string+ # array or one or more first names
age : @int?, # optional age of type int
weight : @number* # array of zero or more weights
}
''')
rslts = x.conforms('''
{ "last_name" : "snooter",
"first_name" : ["grunt", "peter"],
"weight" : []
}''')
self.assertTrue(rslts.success)
if __name__ == '__main__':
unittest.main()
| true
| true
|
79028a488f889c59f63439f73bebec11476bdc65
| 1,896
|
py
|
Python
|
mdp.py
|
GCrispino/vi-pddlgym
|
2401cdbb1590cd0ebab5a3d75549c63aa130ee24
|
[
"MIT"
] | 1
|
2021-11-04T02:07:04.000Z
|
2021-11-04T02:07:04.000Z
|
mdp.py
|
GCrispino/vi-pddlgym
|
2401cdbb1590cd0ebab5a3d75549c63aa130ee24
|
[
"MIT"
] | null | null | null |
mdp.py
|
GCrispino/vi-pddlgym
|
2401cdbb1590cd0ebab5a3d75549c63aa130ee24
|
[
"MIT"
] | 1
|
2021-03-07T20:53:38.000Z
|
2021-03-07T20:53:38.000Z
|
import numpy as np
from pddlgym.core import get_successor_states, InvalidAction
from pddlgym.inference import check_goal
def get_all_reachable(s, A, env, reach=None):
reach = {} if not reach else reach
reach[s] = {}
for a in A:
try:
succ = get_successor_states(s,
a,
env.domain,
raise_error_on_invalid_action=True,
return_probs=True)
except InvalidAction:
succ = {s: 1.0}
reach[s][a] = {s_: prob for s_, prob in succ.items()}
for s_ in succ:
if s_ not in reach:
reach.update(get_all_reachable(s_, A, env, reach))
return reach
def vi(S, succ_states, A, V_i, G_i, goal, env, gamma, epsilon):
V = np.zeros(len(V_i))
P = np.zeros(len(V_i))
pi = np.full(len(V_i), None)
print(len(S), len(V_i), len(G_i), len(P))
print(G_i)
P[G_i] = 1
i = 0
diff = np.inf
while True:
print('Iteration', i, diff)
V_ = np.copy(V)
P_ = np.copy(P)
for s in S:
if check_goal(s, goal):
continue
Q = np.zeros(len(A))
Q_p = np.zeros(len(A))
cost = 1
for i_a, a in enumerate(A):
succ = succ_states[s, a]
probs = np.fromiter(iter(succ.values()), dtype=float)
succ_i = [V_i[succ_s] for succ_s in succ_states[s, a]]
Q[i_a] = cost + np.dot(probs, gamma * V_[succ_i])
Q_p[i_a] = np.dot(probs, P_[succ_i])
V[V_i[s]] = np.min(Q)
P[V_i[s]] = np.max(Q_p)
pi[V_i[s]] = A[np.argmin(Q)]
diff = np.linalg.norm(V_ - V, np.inf)
if diff < epsilon:
break
i += 1
return V, pi
| 29.625
| 75
| 0.477848
|
import numpy as np
from pddlgym.core import get_successor_states, InvalidAction
from pddlgym.inference import check_goal
def get_all_reachable(s, A, env, reach=None):
reach = {} if not reach else reach
reach[s] = {}
for a in A:
try:
succ = get_successor_states(s,
a,
env.domain,
raise_error_on_invalid_action=True,
return_probs=True)
except InvalidAction:
succ = {s: 1.0}
reach[s][a] = {s_: prob for s_, prob in succ.items()}
for s_ in succ:
if s_ not in reach:
reach.update(get_all_reachable(s_, A, env, reach))
return reach
def vi(S, succ_states, A, V_i, G_i, goal, env, gamma, epsilon):
V = np.zeros(len(V_i))
P = np.zeros(len(V_i))
pi = np.full(len(V_i), None)
print(len(S), len(V_i), len(G_i), len(P))
print(G_i)
P[G_i] = 1
i = 0
diff = np.inf
while True:
print('Iteration', i, diff)
V_ = np.copy(V)
P_ = np.copy(P)
for s in S:
if check_goal(s, goal):
continue
Q = np.zeros(len(A))
Q_p = np.zeros(len(A))
cost = 1
for i_a, a in enumerate(A):
succ = succ_states[s, a]
probs = np.fromiter(iter(succ.values()), dtype=float)
succ_i = [V_i[succ_s] for succ_s in succ_states[s, a]]
Q[i_a] = cost + np.dot(probs, gamma * V_[succ_i])
Q_p[i_a] = np.dot(probs, P_[succ_i])
V[V_i[s]] = np.min(Q)
P[V_i[s]] = np.max(Q_p)
pi[V_i[s]] = A[np.argmin(Q)]
diff = np.linalg.norm(V_ - V, np.inf)
if diff < epsilon:
break
i += 1
return V, pi
| true
| true
|
79028a9d93d4df739fdc855f8449297f6c89c314
| 2,540
|
py
|
Python
|
tests/test_sklearn.py
|
data-science-lab-amsterdam/skippa
|
1349317c441f1e46e22f4c02a8aceae767aea5fe
|
[
"BSD-3-Clause"
] | 33
|
2021-12-15T22:56:12.000Z
|
2022-02-26T12:33:56.000Z
|
tests/test_sklearn.py
|
data-science-lab-amsterdam/skippa
|
1349317c441f1e46e22f4c02a8aceae767aea5fe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sklearn.py
|
data-science-lab-amsterdam/skippa
|
1349317c441f1e46e22f4c02a8aceae767aea5fe
|
[
"BSD-3-Clause"
] | 1
|
2022-01-20T15:41:35.000Z
|
2022-01-20T15:41:35.000Z
|
import pytest
import numpy as np
import pandas as pd
from skippa import columns
from skippa.transformers.sklearn import(
SkippaSimpleImputer,
SkippaStandardScaler,
SkippaMinMaxScaler,
SkippaOneHotEncoder,
SkippaLabelEncoder,
SkippaOrdinalEncoder,
SkippaPCA
)
from skippa.utils import get_dummy_data
def test_simpleimputer_float(test_data):
X, _ = test_data
col_spec = columns(dtype_include='float')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_int(test_data):
X, _ = test_data
col_spec = columns(dtype_include='int')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_char(test_data):
X, _ = test_data
col_spec = columns(dtype_include='object')
si = SkippaSimpleImputer(cols=col_spec, strategy='most_frequent')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(X)]
assert subset.isna().sum().sum() == 0
def test_standardscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
ss = SkippaStandardScaler(cols=columns())
res = ss.fit_transform(X)
threshold = 0.01
assert (np.abs(0 - res.mean()) < threshold).all()
def test_minmaxscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
mms = SkippaMinMaxScaler(cols=columns())
res = mms.fit_transform(X)
threshold = 0.01
assert (np.abs(res.min() - 0.) < threshold).all()
assert (np.abs(res.max() - 1.) < threshold).all()
def test_onehotencoder():
X, _ = get_dummy_data(nrows=10, nfloat=0, nint=0, nchar=1, ndate=0)
ohe = SkippaOneHotEncoder(cols=columns())
res = ohe.fit_transform(X)
n_distinct_values = X.iloc[:, 0].nunique(dropna=False)
assert res.shape[1] == n_distinct_values
def test_pca():
n_components = 3
X, _ = get_dummy_data(nrows=100, nfloat=10, nint=0, nchar=1, ndate=0, missing=False)
pca = SkippaPCA(cols=columns(dtype_include='float'), n_components=n_components)
res = pca.fit_transform(X)
assert pca.n_components_ == n_components
assert res.shape[1] == n_components + 1
expected_columns = [f'c{i}' for i in range(n_components)]
assert all([c in res.columns for c in expected_columns])
| 30.60241
| 88
| 0.692913
|
import pytest
import numpy as np
import pandas as pd
from skippa import columns
from skippa.transformers.sklearn import(
SkippaSimpleImputer,
SkippaStandardScaler,
SkippaMinMaxScaler,
SkippaOneHotEncoder,
SkippaLabelEncoder,
SkippaOrdinalEncoder,
SkippaPCA
)
from skippa.utils import get_dummy_data
def test_simpleimputer_float(test_data):
X, _ = test_data
col_spec = columns(dtype_include='float')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_int(test_data):
X, _ = test_data
col_spec = columns(dtype_include='int')
si = SkippaSimpleImputer(cols=col_spec, strategy='median')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(res)]
assert subset.isna().sum().sum() == 0
def test_simpleimputer_char(test_data):
X, _ = test_data
col_spec = columns(dtype_include='object')
si = SkippaSimpleImputer(cols=col_spec, strategy='most_frequent')
res = si.fit_transform(X)
assert isinstance(res, pd.DataFrame)
subset = res[col_spec(X)]
assert subset.isna().sum().sum() == 0
def test_standardscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
ss = SkippaStandardScaler(cols=columns())
res = ss.fit_transform(X)
threshold = 0.01
assert (np.abs(0 - res.mean()) < threshold).all()
def test_minmaxscaler():
X, _ = get_dummy_data(nchar=0, ndate=0, nrows=10)
mms = SkippaMinMaxScaler(cols=columns())
res = mms.fit_transform(X)
threshold = 0.01
assert (np.abs(res.min() - 0.) < threshold).all()
assert (np.abs(res.max() - 1.) < threshold).all()
def test_onehotencoder():
X, _ = get_dummy_data(nrows=10, nfloat=0, nint=0, nchar=1, ndate=0)
ohe = SkippaOneHotEncoder(cols=columns())
res = ohe.fit_transform(X)
n_distinct_values = X.iloc[:, 0].nunique(dropna=False)
assert res.shape[1] == n_distinct_values
def test_pca():
n_components = 3
X, _ = get_dummy_data(nrows=100, nfloat=10, nint=0, nchar=1, ndate=0, missing=False)
pca = SkippaPCA(cols=columns(dtype_include='float'), n_components=n_components)
res = pca.fit_transform(X)
assert pca.n_components_ == n_components
assert res.shape[1] == n_components + 1
expected_columns = [f'c{i}' for i in range(n_components)]
assert all([c in res.columns for c in expected_columns])
| true
| true
|
79028aa9533b2ca6cad9bca5c734f688fc1c8b6f
| 943
|
py
|
Python
|
core/migrations/0002_auto_20161229_2221.py
|
dishad/ADD
|
51455c493a4eb433eb1d8dde44771e917efcb500
|
[
"MIT"
] | null | null | null |
core/migrations/0002_auto_20161229_2221.py
|
dishad/ADD
|
51455c493a4eb433eb1d8dde44771e917efcb500
|
[
"MIT"
] | 4
|
2016-11-26T19:10:01.000Z
|
2016-12-24T10:42:16.000Z
|
core/migrations/0002_auto_20161229_2221.py
|
dishad/deanslist
|
51455c493a4eb433eb1d8dde44771e917efcb500
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-12-30 03:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Subcategory',
),
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Category'),
),
migrations.AlterField(
model_name='salepost',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| 27.735294
| 124
| 0.61824
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Subcategory',
),
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Category'),
),
migrations.AlterField(
model_name='salepost',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| true
| true
|
79028af7b6911898765bc30e4f34690e470d783c
| 5,137
|
py
|
Python
|
autotest/ogr/ogr_style.py
|
riseofthetigers/GDAL
|
918b6939f6be25ac9d36edca3f71c8bf5dd5975e
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_style.py
|
riseofthetigers/GDAL
|
918b6939f6be25ac9d36edca3f71c8bf5dd5975e
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_style.py
|
riseofthetigers/GDAL
|
918b6939f6be25ac9d36edca3f71c8bf5dd5975e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Style testing.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import ogr
from osgeo import gdal
###############################################################################
#
#
def ogr_style_styletable():
style_table = ogr.StyleTable()
style_table.AddStyle("style1_normal", 'SYMBOL(id:"http://style1_normal",c:#67452301)')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = style_table.SaveStyleTable('/nonexistingdir/nonexistingfile')
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
if style_table.SaveStyleTable("/vsimem/out.txt") != 1:
gdaltest.post_reason('failure')
return 'fail'
style_table = None
style_table = ogr.StyleTable()
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = style_table.LoadStyleTable('/nonexisting')
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
if style_table.LoadStyleTable('/vsimem/out.txt') != 1:
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('/vsimem/out.txt')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ret = style_table.Find("non_existing_style")
gdal.PopErrorHandler()
if ret is not None:
gdaltest.post_reason('failure')
return 'fail'
if style_table.Find("style1_normal") != 'SYMBOL(id:"http://style1_normal",c:#67452301)':
gdaltest.post_reason('failure')
return 'fail'
style = style_table.GetNextStyle()
if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)':
gdaltest.post_reason('failure')
return 'fail'
style_name = style_table.GetLastStyleName()
if style_name != 'style1_normal':
gdaltest.post_reason('failure')
return 'fail'
style = style_table.GetNextStyle()
if style is not None:
gdaltest.post_reason('failure')
return 'fail'
style_table.ResetStyleStringReading()
style = style_table.GetNextStyle()
if style is None:
gdaltest.post_reason('failure')
return 'fail'
# GetStyleTable()/SetStyleTable() on data source
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
if ds.GetStyleTable() is not None:
gdaltest.post_reason('failure')
return 'fail'
ds.SetStyleTable(None)
if ds.GetStyleTable() is not None:
gdaltest.post_reason('failure')
return 'fail'
ds.SetStyleTable(style_table)
style_table2 = ds.GetStyleTable()
style = style_table2.GetNextStyle()
if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)':
gdaltest.post_reason('failure')
return 'fail'
# GetStyleTable()/SetStyleTable() on layer
lyr = ds.CreateLayer('foo')
if lyr.GetStyleTable() is not None:
gdaltest.post_reason('failure')
return 'fail'
lyr.SetStyleTable(None)
if lyr.GetStyleTable() is not None:
gdaltest.post_reason('failure')
return 'fail'
lyr.SetStyleTable(style_table)
style_table2 = lyr.GetStyleTable()
style = style_table2.GetNextStyle()
if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)':
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Build tests runner
gdaltest_list = [
ogr_style_styletable ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_style' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 33.575163
| 92
| 0.633444
| true
| true
|
|
79028c9bce485b230ba23f8ce87e16781f267032
| 77
|
py
|
Python
|
.history/classes/Player_20171106170937.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
.history/classes/Player_20171106170937.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
.history/classes/Player_20171106170937.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
class Player():
def __init__(self):
print("PLYR FAK SUM BODIE")
| 15.4
| 35
| 0.597403
|
class Player():
def __init__(self):
print("PLYR FAK SUM BODIE")
| true
| true
|
79028da567c1f41664f026797da292dc35b3b8a0
| 1,214
|
py
|
Python
|
app/core/models.py
|
StoikovOleh/recipe-app-api
|
bea18993a4563e3d8d5dd1a3bc44b782f5b4517e
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
StoikovOleh/recipe-app-api
|
bea18993a4563e3d8d5dd1a3bc44b782f5b4517e
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
StoikovOleh/recipe-app-api
|
bea18993a4563e3d8d5dd1a3bc44b782f5b4517e
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 31.128205
| 76
| 0.667216
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| true
| true
|
79028f363aec5299347603f6c4801f13319c7e79
| 1,165
|
py
|
Python
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 165
|
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 383
|
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 380
|
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
# We need to make our string alternating, i. e. si≠si+1. When we reverse substring sl…sr,
# we change no more than two pairs sl−1,sl and sr,sr+1. Moreover, one pair should be a
# consecutive pair 00 and other — 11. So, we can find lower bound to our answer as maximum
# between number of pairs of 00 and number of pairs of 11. And we can always reach this
# lower bound, by pairing 00 with 11 or with left/right border of s.
for _ in range(int(input())):
n = int(input())
s = input()
z, o = 0, 0 # will store total number of pairs
zeros, ones = 0, 0 # will store no of pairs in one streak
for el in s:
if el == '1':
ones += 1
# streak of zeros are broken by one so no of pairs of zeros are added to z
z += max(zeros-1, 0)
zeros = 0
if el == '0':
zeros += 1
# streak of ones are broken by one so no of pairs of ones are added to o
o += max(ones-1, 0)
ones = 0
# we count pairs only when it the streak is broken. So to count the final unbroken streak
o += max(ones-1, 0)
z += max(zeros-1, 0)
print(max(o, z))
| 36.40625
| 93
| 0.593133
|
for _ in range(int(input())):
n = int(input())
s = input()
z, o = 0, 0
zeros, ones = 0, 0
for el in s:
if el == '1':
ones += 1
z += max(zeros-1, 0)
zeros = 0
if el == '0':
zeros += 1
o += max(ones-1, 0)
ones = 0
o += max(ones-1, 0)
z += max(zeros-1, 0)
print(max(o, z))
| true
| true
|
79028f78d8a408e97a58606dd5ac6bc08df170f3
| 5,531
|
py
|
Python
|
igemm_codegen.py
|
aska-0096/iGEMMgen
|
cff8507355d86e47f5b099cd9b8a81d94fab93d7
|
[
"MIT"
] | 20
|
2020-04-14T14:39:24.000Z
|
2022-02-23T19:37:04.000Z
|
igemm_codegen.py
|
aska-0096/iGEMMgen
|
cff8507355d86e47f5b099cd9b8a81d94fab93d7
|
[
"MIT"
] | 38
|
2020-04-21T12:23:07.000Z
|
2021-12-31T02:26:21.000Z
|
igemm_codegen.py
|
aska-0096/iGEMMgen
|
cff8507355d86e47f5b099cd9b8a81d94fab93d7
|
[
"MIT"
] | 9
|
2020-04-20T06:34:16.000Z
|
2022-02-23T19:37:06.000Z
|
################################################################################
#
# MIT License
#
# Copyright (c) 2020 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from __future__ import print_function
import argparse
import sys, os, shutil
from python import *
OUT_DIR='out'
def igemm_flatten(args, config_content):
asm_target = os.path.join(args.dir, os.path.splitext(os.path.basename(args.config_file))[0] + '.s')
emitter = mc_emit_to_file_t(asm_target)
sec_root = config_content.get_section('codegen')[0]
arch = amdgpu_arch_config_t({
'arch' : amdgpu_string_to_arch( sec_root['arch'] ),
'data_type' : AMDGPU_PRECISION_FP32,
'code_object' : amdgpu_string_to_codeobj( sec_root['code_object']) })
# create mc
mc = mc_asm_printer_t(emitter, arch)
mc_set_current(mc)
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
codegen_driver_t(mc, tunable_dicts)(split_kernel = args.split_kernel)
# os.chmod(asm_target, 0x777)
def igemm_out_tunable_param(output_file, config_content):
sec_root = config_content.get_section('codegen')[0]
list_emitter = mc_emit_to_file_t(output_file)
list_emitter.open()
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
td_item = igemm_gtc_tunable_parameter_t(td)
list_emitter.emit(td_item.output())
list_emitter.close()
def igemm_check_fp16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "fp16" in td['precision']:
return True
return False
def igemm_check_int8_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "int8" in td['precision']:
return True
return False
def igemm_check_bf16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "bf16" in td['precision']:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("config_file", help="config file as input")
parser.add_argument("-d", "--dir", help="directory of output files", default = OUT_DIR)
parser.add_argument("-output", nargs='?', const='tunable_parameter_list.txt', help="output tunable parameter list")
parser.add_argument("-s", "--split_kernel", action="store_true")
args = parser.parse_args()
config_parser = config_parser_t(args.config_file)
#print(os.getcwd())
config_content = config_parser()
#config_content.dump()
#print(args.output)
if args.output:
igemm_out_tunable_param(args.output, config_content)
arch = config_content.get_section('codegen')[0]['arch']
code_object = config_content.get_section('codegen')[0]['code_object']
has_fp16_config = igemm_check_fp16_configs(config_content)
has_int8_config = igemm_check_int8_configs(config_content)
has_bf16_config = igemm_check_bf16_configs(config_content)
if config_content.get_section('codegen')[0]['mode'] in ('flat', 'flatten'):
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
cxxflags = []
if args.split_kernel:
cxxflags += ["-DIGEMM_SPLIT_KERNEL"]
host_driver(cxxflags=cxxflags, arch=arch, config_file=args.config_file, out_dir=args.dir, has_fp16_config=has_fp16_config, has_int8_config=has_int8_config, has_bf16_config=has_bf16_config)
igemm_flatten(args, config_content)
if config_content.get_section('codegen')[0]['mode'] in ('seq', 'sequencer'):
# config_content.dump()
# igemm_sequence(args, config_content)
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
sequence_driver(arch=arch, code_object=code_object,
config_content=config_content, out_dir=args.dir )
| 42.875969
| 196
| 0.685952
| true
| true
|
|
79028faaa57ab6b8d5b1f1de6b8b8fe297b3c725
| 5,044
|
py
|
Python
|
tests/riscv/vector/vector_simple_add_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/vector/vector_simple_add_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/vector/vector_simple_add_force.py
|
Wlgen/force-riscv
|
9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test verifies that a basic add vector instruction can be generated and
# executed. It verifies that the initial values are correctly communicated to
# the simulator and that the resulting values are successfully returned. The
# test assumes the use of 512-bit vector registers and 32-bit vector register
# elements.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = ("VADD.VV##RISCV",)
self._mRegIndex1 = None
self._mRegIndex2 = None
self._mElemVals1 = None
self._mElemVals2 = None
# Set up the environment prior to generating the test instructions.
def _setUpTest(self):
# Ensure vector element size is set to 32 bits and vector register
# group size is set to 1
choices_mod = ChoicesModifier(self.genThread)
vsew_choice_weights = {
"0x0": 0,
"0x1": 0,
"0x2": 10,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VSEW", vsew_choice_weights)
vlmul_choice_weights = {
"0x0": 10,
"0x1": 0,
"0x2": 0,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VLMUL", vlmul_choice_weights)
choices_mod.commitSet()
(self._mRegIndex1, self._mRegIndex2) = self.getRandomRegisters(2, "VECREG", exclude="0")
self._mElemVals1 = self._initializeVectorRegister("v%d" % self._mRegIndex1)
self._mElemVals2 = self._initializeVectorRegister("v%d" % self._mRegIndex2)
# Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
# Return parameters to be passed to Sequence.genInstruction().
def _getInstructionParameters(self):
return {
"vd": self._mRegIndex1,
"vs1": self._mRegIndex1,
"vs2": self._mRegIndex2,
"vm": 1,
}
# Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
for (elem_index, val) in enumerate(self._mElemVals2):
self._mElemVals1[elem_index] += val
reg_name_1 = "v%d" % self._mRegIndex1
for sub_index in range(8):
field_name = "%s_%d" % (reg_name_1, sub_index)
(field_val, valid) = self.readRegister(reg_name_1, field=field_name)
self.assertValidRegisterValue(reg_name_1, valid)
expected_field_val = self._getFieldValue(sub_index, self._mElemVals1)
if field_val != expected_field_val:
self.error(
"Register field %s has unexpected value; "
"Expected=0x%x, Actual=0x%x" % (field_name, expected_field_val, field_val)
)
# Initialize the specified vector register and return a list of 32-bit
# element values.
def _initializeVectorRegister(self, aRegName):
elem_vals = []
for elem_index in range(16):
elem_val = RandomUtils.random32(0, 0xFFFF)
elem_vals.append(elem_val)
for sub_index in range(8):
field_name = "%s_%d" % (aRegName, sub_index)
field_val = self._getFieldValue(sub_index, elem_vals)
self.initializeRegisterFields(aRegName, {field_name: field_val})
return elem_vals
# Get the value of a 64-bit field for a vector register.
#
# @param aSubIndex A 64-bit vector register field index.
# @param aElemVals A list of 32-bit element values.
def _getFieldValue(self, aSubIndex, aElemVals):
field_value = aElemVals[2 * aSubIndex]
field_value |= aElemVals[2 * aSubIndex + 1] << 32
return field_value
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 37.641791
| 96
| 0.652458
|
import RandomUtils
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = ("VADD.VV##RISCV",)
self._mRegIndex1 = None
self._mRegIndex2 = None
self._mElemVals1 = None
self._mElemVals2 = None
def _setUpTest(self):
choices_mod = ChoicesModifier(self.genThread)
vsew_choice_weights = {
"0x0": 0,
"0x1": 0,
"0x2": 10,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VSEW", vsew_choice_weights)
vlmul_choice_weights = {
"0x0": 10,
"0x1": 0,
"0x2": 0,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VLMUL", vlmul_choice_weights)
choices_mod.commitSet()
(self._mRegIndex1, self._mRegIndex2) = self.getRandomRegisters(2, "VECREG", exclude="0")
self._mElemVals1 = self._initializeVectorRegister("v%d" % self._mRegIndex1)
self._mElemVals2 = self._initializeVectorRegister("v%d" % self._mRegIndex2)
def _getInstructionList(self):
return self._mInstrList
def _getInstructionParameters(self):
return {
"vd": self._mRegIndex1,
"vs1": self._mRegIndex1,
"vs2": self._mRegIndex2,
"vm": 1,
}
def _performAdditionalVerification(self, aInstr, aInstrRecord):
for (elem_index, val) in enumerate(self._mElemVals2):
self._mElemVals1[elem_index] += val
reg_name_1 = "v%d" % self._mRegIndex1
for sub_index in range(8):
field_name = "%s_%d" % (reg_name_1, sub_index)
(field_val, valid) = self.readRegister(reg_name_1, field=field_name)
self.assertValidRegisterValue(reg_name_1, valid)
expected_field_val = self._getFieldValue(sub_index, self._mElemVals1)
if field_val != expected_field_val:
self.error(
"Register field %s has unexpected value; "
"Expected=0x%x, Actual=0x%x" % (field_name, expected_field_val, field_val)
)
def _initializeVectorRegister(self, aRegName):
elem_vals = []
for elem_index in range(16):
elem_val = RandomUtils.random32(0, 0xFFFF)
elem_vals.append(elem_val)
for sub_index in range(8):
field_name = "%s_%d" % (aRegName, sub_index)
field_val = self._getFieldValue(sub_index, elem_vals)
self.initializeRegisterFields(aRegName, {field_name: field_val})
return elem_vals
def _getFieldValue(self, aSubIndex, aElemVals):
field_value = aElemVals[2 * aSubIndex]
field_value |= aElemVals[2 * aSubIndex + 1] << 32
return field_value
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| true
| true
|
790290c91658ac433724591c40bbc1492630611b
| 1,526
|
py
|
Python
|
GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR06_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR06_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR06_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR06_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR06_IsolatedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR06_IsolatedLHS, self).__init__(name='HUnitR06_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR06_IsolatedLHS')
self["equations"] = []
# Set the node attributes
# match class PhysicalNode(6.0.m.0PhysicalNode) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.0PhysicalNode')
# match class Partition(6.0.m.1Partition) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.1Partition')
# define evaluation methods for each apply class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
def constraint(self, PreNode, graph):
return True
| 29.921569
| 96
| 0.700524
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR06_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
self.is_compiled = True
super(HUnitR06_IsolatedLHS, self).__init__(name='HUnitR06_IsolatedLHS', num_nodes=0, edges=[])
self.add_edges([])
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR06_IsolatedLHS')
self["equations"] = []
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.0PhysicalNode')
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Partition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.1Partition')
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
def constraint(self, PreNode, graph):
return True
| true
| true
|
79029123b44893cb7ec7bfa422f207de71196e2f
| 1,198
|
py
|
Python
|
tests/test_profiler.py
|
MrPainter/aiomisc
|
97187987292c3f8c642a2932438061bb9ba1add6
|
[
"MIT"
] | null | null | null |
tests/test_profiler.py
|
MrPainter/aiomisc
|
97187987292c3f8c642a2932438061bb9ba1add6
|
[
"MIT"
] | null | null | null |
tests/test_profiler.py
|
MrPainter/aiomisc
|
97187987292c3f8c642a2932438061bb9ba1add6
|
[
"MIT"
] | null | null | null |
import asyncio
import os
from pstats import Stats
from tempfile import NamedTemporaryFile
from aiomisc.service.profiler import Profiler
async def test_profiler_start_stop():
profiler = Profiler(interval=0.1, top_results=10)
try:
await profiler.start()
await asyncio.sleep(0.5)
finally:
await profiler.stop()
async def test_profiler_dump():
profiler = None
fl = NamedTemporaryFile(delete=False)
path = NamedTemporaryFile(delete=False).name
fl.close()
try:
profiler = Profiler(
interval=0.1, top_results=10,
path=path
)
await profiler.start()
# Get first update
await asyncio.sleep(0.01)
stats1 = Stats(path)
# Not enough sleep till next update
await asyncio.sleep(0.01)
stats2 = Stats(path)
# Getting the same dump
assert stats1.stats == stats2.stats
# Enough sleep till next update
await asyncio.sleep(0.2)
stats3 = Stats(path)
# Getting updated dump
assert stats2.stats != stats3.stats
finally:
if profiler:
await profiler.stop()
os.remove(path)
| 23.038462
| 53
| 0.6202
|
import asyncio
import os
from pstats import Stats
from tempfile import NamedTemporaryFile
from aiomisc.service.profiler import Profiler
async def test_profiler_start_stop():
profiler = Profiler(interval=0.1, top_results=10)
try:
await profiler.start()
await asyncio.sleep(0.5)
finally:
await profiler.stop()
async def test_profiler_dump():
profiler = None
fl = NamedTemporaryFile(delete=False)
path = NamedTemporaryFile(delete=False).name
fl.close()
try:
profiler = Profiler(
interval=0.1, top_results=10,
path=path
)
await profiler.start()
await asyncio.sleep(0.01)
stats1 = Stats(path)
await asyncio.sleep(0.01)
stats2 = Stats(path)
assert stats1.stats == stats2.stats
await asyncio.sleep(0.2)
stats3 = Stats(path)
assert stats2.stats != stats3.stats
finally:
if profiler:
await profiler.stop()
os.remove(path)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.