code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import stepwise, appcli, autoprop
from inform import warn
from appcli import Key, Method, DocoptConfig
from stepwise import StepwiseConfig, PresetConfig, Quantity, oxford_comma
from stepwise_mol_bio import Cleanup, format_sec
from freezerbox import MakerConfig, group_by_identity, parse_volume_uL, unanimous
from more_itertools import always_iterable
def ng_uL(x):
return Quantity(x, 'ng/µL')
@autoprop
class SpinCleanup(Cleanup):
"""\
Purify a PCR reaction using a silica spin column.
Usage:
spin_cleanup [<preset>] [-s <µL>] [-d <buffer>] [-v <µL>]
<%! from stepwise_mol_bio import hanging_indent %>\
Arguments:
<preset> [default: ${app.preset}]
The default parameters to use. Typically these correspond to
commercial kits:
${hanging_indent(app.preset_briefs, 8*' ')}
Options:
-s --sample-volume <µL>
The volume of the sample, in µL.
-d --elute-buffer <name>
The buffer to elute in.
-v --elute-volume <µL>
The volume of purified DNA/RNA to elute, in µL. The default value
depends on the preset, but can usually be lowered to get more
concentrated product. A warning will be displayed if the requested
volume is lower than the minimum recommended by the kit manufacturer.
Configuration:
Default values for this protocol can be specified in any of the following
stepwise configuration files:
${hanging_indent(app.config_paths, 8)}
molbio.spin_cleanup.default_preset:
The default value for the `--preset` option.
molbio.spin_cleanup.presets:
Named groups of default reaction parameters. Typically each preset
corresponds to a particular kit or protocol. See below for the various
settings that can be specified in each preset.
molbio.spin_cleanup.presets.<name>.protocol_name
How to refer to the whole protocol. Commonly this is the name of the
spin column kit.
molbio.spin_cleanup.presets.<name>.protocol_link
A link (typically minified) to the complete protocol, e.g. as published
by the manufacturer of the columns. This is not required, but if
specified, will be included in the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.column_name
How to refer to the specific spin column used in the protocol.
molbio.spin_cleanup.presets.<name>.spin_speed_g
How fast to spin the column in each centrifugation step, in units of
g-force.
molbio.spin_cleanup.presets.<name>.column_capacity_ug
The maximum binding capacity of the column, in µg. This information is
added to the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.sample_type
How to generically refer to the sample in the protocol, e.g. "DNA".
molbio.spin_cleanup.presets.<name>.sample_volume_uL
The volume of sample to load on the column, in µL. Alternatively, this
can be a dictionary with keys 'min' and/or 'max' specifying the minimum
and maximum allowed sample volumes, respectively.
molbio.spin_cleanup.presets.<name>.bind_buffer
The name(s) of the buffer(s) to use to bind the sample to column. This
can be either a string or a list of strings. Use a list to specify
that multiple buffers (e.g. binding buffer and ethanol) should be mixed
with the sample before it is loaded on the column. If this option is a
list, the `bind_volume_uL` and `bind_volume_x` options must also be
lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.bind_volume_uL
How much `bind_buffer` to use, in µL. This can be either a number or a
list of numbers; see `bind_buffer` for more details. This takes
precedence over the `bind_volume_x` setting.
molbio.spin_cleanup.presets.<name>.bind_volume_x
How much `bind_buffer` to use, as a multiple of the sample volume.
This can be a number or a list of numbers; see `bind_buffer` for more
details. This is superseded by the `bind_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.bind_spin_sec
How long to centrifuge the column during the bind step.
molbio.spin_cleanup.presets.<name>.bind_vacuum
Whether or not to use a vacuum manifold for the bind step. The default
is False. If True, the `bind_spin_sec` option is ignored.
molbio.spin_cleanup.presets.<name>.pH_buffer
The name of the buffer to use when adjusting the pH of the sample.
molbio.spin_cleanup.presets.<name>.pH_volume_uL
How much `pH_buffer` to use, in µL. This takes precedence over the
`pH_volume_x` setting.
molbio.spin_cleanup.presets.<name>.pH_volume_x
How much `pH_buffer` to use, as a multiple of the sample volume.
This is superseded by the `pH_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.pH_color
The color the sample/binding buffer should be after reaching the
correct pH.
molbio.spin_cleanup.presets.<name>.wash_buffer
The name of the buffer to use when washing the column. This can either
be a string or a list of strings. Use a list to specify that there
should be multiple wash steps. If this option is a list, the
`wash_volume_uL`, `wash_spin_sec`, and `wash_vacuum` options must also
be lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.wash_volume_uL
The volume of `wash_buffer` to use, in µL. This can either be a number
or a list of numbers; see `wash_buffer` for more details.
molbio.spin_cleanup.presets.<name>.wash_spin_sec
How long to centrifuge the column during the wash step. This can
either be a number or a list of numbers; see `wash_buffer` for more
details.
molbio.spin_cleanup.presets.<name>.wash_vacuum
Whether or not to use a vacuum manifold for the wash step. This can
either be a boolean or a list of booleans; see `wash_buffer` for more
details. The default is False. If True, the `wash_spin_sec` option is
ignored.
molbio.spin_cleanup.presets.<name>.dry_spin_sec
How long to centrifuge the column after the wash step(s), e.g. to
remove any residual ethanol. If left unspecified, this step will not
be included in the protocol.
molbio.spin_cleanup.presets.<name>.elute_buffer
The default value for the `--elute-buffer` flag.
molbio.spin_cleanup.presets.<name>.elute_volume_uL
The default value for the `--elute-volume` flag.
molbio.spin_cleanup.presets.<name>.elute_min_volume_uL
The minimum recommended volume to elute in. Smaller volumes can still
be specified, but will be accompanied by a warning.
molbio.spin_cleanup.presets.<name>.elute_wait_sec
How long to incubate the column with elution buffer before eluting, in
seconds.
molbio.spin_cleanup.presets.<name>.elute_spin_sec
How long to centrifuge the column when eluting.
Database:
Spin-column cleanup protocols can appear in the "Cleanups" column of a
FreezerBox database:
spin-cleanup [<preset>] [volume=<µL>] [buffer=<name>]
<preset>
See `<preset>`.
volume=<µL>
See `--elute-volume`. Must specify a unit.
buffer=<µL>
See `--elute-buffer`.
"""
__config__ = [
DocoptConfig,
MakerConfig,
PresetConfig,
StepwiseConfig.setup('molbio.spin_cleanup'),
]
preset_briefs = appcli.config_attr()
config_paths = appcli.config_attr()
preset_brief_template = '{protocol_name}'
presets = appcli.param(
Key(StepwiseConfig, 'presets'),
pick=list,
)
preset = appcli.param(
Key(DocoptConfig, '<preset>'),
Key(MakerConfig, 1),
Key(StepwiseConfig, 'default_preset'),
)
protocol_name = appcli.param(
Key(PresetConfig, 'protocol_name'),
)
protocol_link = appcli.param(
Key(PresetConfig, 'protocol_link'),
default=None,
)
column_name = appcli.param(
Key(PresetConfig, 'column_name'),
default='silica spin column',
)
spin_speed_g = appcli.param(
Key(PresetConfig, 'spin_speed_g'),
default=None,
)
column_capacity_ug = appcli.param(
Key(PresetConfig, 'column_capacity_ug'),
default=None,
)
sample_type = appcli.param(
Key(PresetConfig, 'sample_type'),
default='DNA',
)
sample_volume_uL = appcli.param(
Key(DocoptConfig, '--sample-volume', cast=float),
default=None,
)
target_sample_volume_uL = appcli.param(
Key(PresetConfig, 'sample_volume_uL'),
default=None,
)
bind_buffer = appcli.param(
Key(PresetConfig, 'bind_buffer'),
)
bind_volume_uL = appcli.param(
Key(PresetConfig, 'bind_volume_uL'),
default=None
)
bind_volume_x = appcli.param(
Key(PresetConfig, 'bind_volume_x'),
default=None
)
bind_spin_sec = appcli.param(
Key(PresetConfig, 'bind_spin_sec'),
default=None
)
bind_vacuum = appcli.param(
Key(PresetConfig, 'bind_vacuum'),
default=False,
)
ph_buffer = appcli.param(
Key(PresetConfig, 'pH_buffer'),
default=None,
)
ph_volume_uL = appcli.param(
Key(PresetConfig, 'pH_volume_uL'),
default=None
)
ph_volume_x = appcli.param(
Key(PresetConfig, 'pH_volume_x'),
default=None
)
ph_color = appcli.param(
Key(PresetConfig, 'pH_color'),
)
wash_buffer = appcli.param(
Key(PresetConfig, 'wash_buffer'),
)
wash_volume_uL = appcli.param(
Key(PresetConfig, 'wash_volume_uL'),
)
wash_spin_sec = appcli.param(
Key(PresetConfig, 'wash_spin_sec'),
default=None,
)
wash_vacuum = appcli.param(
Key(PresetConfig, 'wash_vacuum'),
default=False,
)
dry_spin_sec = appcli.param(
Key(PresetConfig, 'dry_spin_sec'),
default=None,
)
elute_buffer = appcli.param(
Key(DocoptConfig, '--elute-buffer'),
Key(MakerConfig, 'buffer'),
Key(PresetConfig, 'elute_buffer'),
)
elute_volume_uL = appcli.param(
Key(DocoptConfig, '--elute-volume', cast=float),
Key(MakerConfig, 'volume', cast=parse_volume_uL),
Key(PresetConfig, 'elute_volume_uL'),
)
elute_min_volume_uL = appcli.param(
Key(PresetConfig, 'elute_min_volume_uL'),
default=None,
)
elute_wait_sec = appcli.param(
Key(PresetConfig, 'elute_wait_sec'),
default=None,
)
elute_spin_sec = appcli.param(
Key(PresetConfig, 'elute_spin_sec'),
)
group_by = {
'preset': group_by_identity,
'elute_buffer': group_by_identity,
'elute_volume_uL': group_by_identity,
}
def __init__(self, preset=None):
if preset is not None:
self.preset = preset
def get_protocol(self):
p = stepwise.Protocol()
pl = stepwise.paragraph_list()
ul = stepwise.unordered_list()
def break_if_too_long(pl, ul, n=4):
if len(ul) > n:
ul = stepwise.unordered_list()
pl += ul
return ul
footnotes = []
if self.protocol_link:
footnotes.append(self.protocol_link)
if self.column_capacity_ug:
footnotes.append(f"Column capacity: {self.column_capacity_ug} µg")
if self.product_tags and self.show_product_tags:
product_tags = oxford_comma(self.product_tags) + ' '
else:
product_tags = ''
p += pl
pl += f"Purify {product_tags}using {self.protocol_name}{p.add_footnotes(*footnotes)}:"
pl += ul
if self.spin_speed_g:
ul += f"Perform all spin steps at {self.spin_speed_g}g."
## Dilute
if x := self.target_sample_volume_uL:
v = self.sample_volume_uL
if not isinstance(x, dict):
target = f'{x} µL'
skip = v and v == x
self.sample_volume_uL = x
elif 'min' in x and 'max' in x:
target = f"between {x['min']}–{x['max']} µL"
skip = v and x['min'] <= v <= x['max']
elif 'min' in x:
target = f"at least {x['min']} µL"
skip = v and x['min'] <= v
elif 'max' in x:
target = f"at most {x['max']} µL"
skip = v and v <= x['max']
if not skip:
ul += f"Ensure that the sample is {target}."
## Bind
bind_params = zip_params(
self.bind_buffer,
self.bind_volume_x,
self.bind_volume_uL,
)
for bind_buffer, bind_volume_x, bind_volume_uL in bind_params:
bind_volume = resolve_volume(bind_volume_uL, bind_volume_x, self.sample_volume_uL)
ul += f"Add {bind_volume} {bind_buffer} to the crude {self.sample_type}."
if self.ph_buffer:
ph_volume = resolve_volume(self.ph_volume_uL, self.ph_volume_x, self.sample_volume_uL)
ul += f"If not {self.ph_color}: Add {ph_volume} {self.ph_buffer}."
ul += f"Load on a {self.column_name}."
ul += flush_column(self.bind_spin_sec, self.bind_vacuum)
ul = break_if_too_long(pl, ul)
## Wash
wash_params = zip_params(
self.wash_buffer,
self.wash_volume_uL,
self.wash_spin_sec,
self.wash_vacuum,
)
for wash_buffer, wash_volume_uL, wash_spin_sec, wash_vacuum in wash_params:
ul += f"Add {wash_volume_uL} µL {wash_buffer}."
ul += flush_column(wash_spin_sec, wash_vacuum)
## Dry
if self.dry_spin_sec:
ul += flush_column(self.dry_spin_sec)
ul = break_if_too_long(pl, ul)
## Elute
if self.elute_volume_uL < self.elute_min_volume_uL:
warn(f"Elution volume ({self.elute_volume_uL} µL) is below the recommended minimum ({self.elute_min_volume_uL} µL).")
ul += f"Add {self.elute_volume_uL} µL {self.elute_buffer}."
if self.elute_wait_sec:
ul += f"Wait at least {format_sec(self.elute_wait_sec)}."
ul += flush_column(self.elute_spin_sec, keep_flowthrough=True)
return p
def get_product_conc(self):
v0 = unanimous(x.precursor.volume for x in self.products)
c0 = unanimous(x.precursor.conc for x in self.products)
return c0 * (v0 / self.product_volume)
def get_product_volume(self):
return Quantity(self.elute_volume_uL, 'µL')
def zip_params(*params):
from itertools import repeat
from more_itertools import always_iterable
yield from zip(*(
always_iterable(p or repeat(p))
for p in params
))
def resolve_volume(volume_uL, volume_x, sample_volume_uL):
if volume_uL:
return f'{volume_uL} µL'
elif sample_volume_uL:
return f'{volume_x * sample_volume_uL} µL'
else:
return f'{volume_x} volumes'
def flush_column(spin_time_sec, use_vacuum=False, keep_flowthrough=False):
if use_vacuum:
return "Apply vacuum."
else:
if not spin_time_sec:
raise ValueError("no spin time specified")
return f"Spin {format_sec(spin_time_sec)}; {'keep' if keep_flowthrough else 'discard'} flow-through."
if __name__ == '__main__':
SpinCleanup.main() | stepwise_mol_bio/spin_cleanup.py |
import stepwise, appcli, autoprop
from inform import warn
from appcli import Key, Method, DocoptConfig
from stepwise import StepwiseConfig, PresetConfig, Quantity, oxford_comma
from stepwise_mol_bio import Cleanup, format_sec
from freezerbox import MakerConfig, group_by_identity, parse_volume_uL, unanimous
from more_itertools import always_iterable
def ng_uL(x):
return Quantity(x, 'ng/µL')
@autoprop
class SpinCleanup(Cleanup):
"""\
Purify a PCR reaction using a silica spin column.
Usage:
spin_cleanup [<preset>] [-s <µL>] [-d <buffer>] [-v <µL>]
<%! from stepwise_mol_bio import hanging_indent %>\
Arguments:
<preset> [default: ${app.preset}]
The default parameters to use. Typically these correspond to
commercial kits:
${hanging_indent(app.preset_briefs, 8*' ')}
Options:
-s --sample-volume <µL>
The volume of the sample, in µL.
-d --elute-buffer <name>
The buffer to elute in.
-v --elute-volume <µL>
The volume of purified DNA/RNA to elute, in µL. The default value
depends on the preset, but can usually be lowered to get more
concentrated product. A warning will be displayed if the requested
volume is lower than the minimum recommended by the kit manufacturer.
Configuration:
Default values for this protocol can be specified in any of the following
stepwise configuration files:
${hanging_indent(app.config_paths, 8)}
molbio.spin_cleanup.default_preset:
The default value for the `--preset` option.
molbio.spin_cleanup.presets:
Named groups of default reaction parameters. Typically each preset
corresponds to a particular kit or protocol. See below for the various
settings that can be specified in each preset.
molbio.spin_cleanup.presets.<name>.protocol_name
How to refer to the whole protocol. Commonly this is the name of the
spin column kit.
molbio.spin_cleanup.presets.<name>.protocol_link
A link (typically minified) to the complete protocol, e.g. as published
by the manufacturer of the columns. This is not required, but if
specified, will be included in the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.column_name
How to refer to the specific spin column used in the protocol.
molbio.spin_cleanup.presets.<name>.spin_speed_g
How fast to spin the column in each centrifugation step, in units of
g-force.
molbio.spin_cleanup.presets.<name>.column_capacity_ug
The maximum binding capacity of the column, in µg. This information is
added to the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.sample_type
How to generically refer to the sample in the protocol, e.g. "DNA".
molbio.spin_cleanup.presets.<name>.sample_volume_uL
The volume of sample to load on the column, in µL. Alternatively, this
can be a dictionary with keys 'min' and/or 'max' specifying the minimum
and maximum allowed sample volumes, respectively.
molbio.spin_cleanup.presets.<name>.bind_buffer
The name(s) of the buffer(s) to use to bind the sample to column. This
can be either a string or a list of strings. Use a list to specify
that multiple buffers (e.g. binding buffer and ethanol) should be mixed
with the sample before it is loaded on the column. If this option is a
list, the `bind_volume_uL` and `bind_volume_x` options must also be
lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.bind_volume_uL
How much `bind_buffer` to use, in µL. This can be either a number or a
list of numbers; see `bind_buffer` for more details. This takes
precedence over the `bind_volume_x` setting.
molbio.spin_cleanup.presets.<name>.bind_volume_x
How much `bind_buffer` to use, as a multiple of the sample volume.
This can be a number or a list of numbers; see `bind_buffer` for more
details. This is superseded by the `bind_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.bind_spin_sec
How long to centrifuge the column during the bind step.
molbio.spin_cleanup.presets.<name>.bind_vacuum
Whether or not to use a vacuum manifold for the bind step. The default
is False. If True, the `bind_spin_sec` option is ignored.
molbio.spin_cleanup.presets.<name>.pH_buffer
The name of the buffer to use when adjusting the pH of the sample.
molbio.spin_cleanup.presets.<name>.pH_volume_uL
How much `pH_buffer` to use, in µL. This takes precedence over the
`pH_volume_x` setting.
molbio.spin_cleanup.presets.<name>.pH_volume_x
How much `pH_buffer` to use, as a multiple of the sample volume.
This is superseded by the `pH_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.pH_color
The color the sample/binding buffer should be after reaching the
correct pH.
molbio.spin_cleanup.presets.<name>.wash_buffer
The name of the buffer to use when washing the column. This can either
be a string or a list of strings. Use a list to specify that there
should be multiple wash steps. If this option is a list, the
`wash_volume_uL`, `wash_spin_sec`, and `wash_vacuum` options must also
be lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.wash_volume_uL
The volume of `wash_buffer` to use, in µL. This can either be a number
or a list of numbers; see `wash_buffer` for more details.
molbio.spin_cleanup.presets.<name>.wash_spin_sec
How long to centrifuge the column during the wash step. This can
either be a number or a list of numbers; see `wash_buffer` for more
details.
molbio.spin_cleanup.presets.<name>.wash_vacuum
Whether or not to use a vacuum manifold for the wash step. This can
either be a boolean or a list of booleans; see `wash_buffer` for more
details. The default is False. If True, the `wash_spin_sec` option is
ignored.
molbio.spin_cleanup.presets.<name>.dry_spin_sec
How long to centrifuge the column after the wash step(s), e.g. to
remove any residual ethanol. If left unspecified, this step will not
be included in the protocol.
molbio.spin_cleanup.presets.<name>.elute_buffer
The default value for the `--elute-buffer` flag.
molbio.spin_cleanup.presets.<name>.elute_volume_uL
The default value for the `--elute-volume` flag.
molbio.spin_cleanup.presets.<name>.elute_min_volume_uL
The minimum recommended volume to elute in. Smaller volumes can still
be specified, but will be accompanied by a warning.
molbio.spin_cleanup.presets.<name>.elute_wait_sec
How long to incubate the column with elution buffer before eluting, in
seconds.
molbio.spin_cleanup.presets.<name>.elute_spin_sec
How long to centrifuge the column when eluting.
Database:
Spin-column cleanup protocols can appear in the "Cleanups" column of a
FreezerBox database:
spin-cleanup [<preset>] [volume=<µL>] [buffer=<name>]
<preset>
See `<preset>`.
volume=<µL>
See `--elute-volume`. Must specify a unit.
buffer=<µL>
See `--elute-buffer`.
"""
__config__ = [
DocoptConfig,
MakerConfig,
PresetConfig,
StepwiseConfig.setup('molbio.spin_cleanup'),
]
preset_briefs = appcli.config_attr()
config_paths = appcli.config_attr()
preset_brief_template = '{protocol_name}'
presets = appcli.param(
Key(StepwiseConfig, 'presets'),
pick=list,
)
preset = appcli.param(
Key(DocoptConfig, '<preset>'),
Key(MakerConfig, 1),
Key(StepwiseConfig, 'default_preset'),
)
protocol_name = appcli.param(
Key(PresetConfig, 'protocol_name'),
)
protocol_link = appcli.param(
Key(PresetConfig, 'protocol_link'),
default=None,
)
column_name = appcli.param(
Key(PresetConfig, 'column_name'),
default='silica spin column',
)
spin_speed_g = appcli.param(
Key(PresetConfig, 'spin_speed_g'),
default=None,
)
column_capacity_ug = appcli.param(
Key(PresetConfig, 'column_capacity_ug'),
default=None,
)
sample_type = appcli.param(
Key(PresetConfig, 'sample_type'),
default='DNA',
)
sample_volume_uL = appcli.param(
Key(DocoptConfig, '--sample-volume', cast=float),
default=None,
)
target_sample_volume_uL = appcli.param(
Key(PresetConfig, 'sample_volume_uL'),
default=None,
)
bind_buffer = appcli.param(
Key(PresetConfig, 'bind_buffer'),
)
bind_volume_uL = appcli.param(
Key(PresetConfig, 'bind_volume_uL'),
default=None
)
bind_volume_x = appcli.param(
Key(PresetConfig, 'bind_volume_x'),
default=None
)
bind_spin_sec = appcli.param(
Key(PresetConfig, 'bind_spin_sec'),
default=None
)
bind_vacuum = appcli.param(
Key(PresetConfig, 'bind_vacuum'),
default=False,
)
ph_buffer = appcli.param(
Key(PresetConfig, 'pH_buffer'),
default=None,
)
ph_volume_uL = appcli.param(
Key(PresetConfig, 'pH_volume_uL'),
default=None
)
ph_volume_x = appcli.param(
Key(PresetConfig, 'pH_volume_x'),
default=None
)
ph_color = appcli.param(
Key(PresetConfig, 'pH_color'),
)
wash_buffer = appcli.param(
Key(PresetConfig, 'wash_buffer'),
)
wash_volume_uL = appcli.param(
Key(PresetConfig, 'wash_volume_uL'),
)
wash_spin_sec = appcli.param(
Key(PresetConfig, 'wash_spin_sec'),
default=None,
)
wash_vacuum = appcli.param(
Key(PresetConfig, 'wash_vacuum'),
default=False,
)
dry_spin_sec = appcli.param(
Key(PresetConfig, 'dry_spin_sec'),
default=None,
)
elute_buffer = appcli.param(
Key(DocoptConfig, '--elute-buffer'),
Key(MakerConfig, 'buffer'),
Key(PresetConfig, 'elute_buffer'),
)
elute_volume_uL = appcli.param(
Key(DocoptConfig, '--elute-volume', cast=float),
Key(MakerConfig, 'volume', cast=parse_volume_uL),
Key(PresetConfig, 'elute_volume_uL'),
)
elute_min_volume_uL = appcli.param(
Key(PresetConfig, 'elute_min_volume_uL'),
default=None,
)
elute_wait_sec = appcli.param(
Key(PresetConfig, 'elute_wait_sec'),
default=None,
)
elute_spin_sec = appcli.param(
Key(PresetConfig, 'elute_spin_sec'),
)
group_by = {
'preset': group_by_identity,
'elute_buffer': group_by_identity,
'elute_volume_uL': group_by_identity,
}
def __init__(self, preset=None):
if preset is not None:
self.preset = preset
def get_protocol(self):
p = stepwise.Protocol()
pl = stepwise.paragraph_list()
ul = stepwise.unordered_list()
def break_if_too_long(pl, ul, n=4):
if len(ul) > n:
ul = stepwise.unordered_list()
pl += ul
return ul
footnotes = []
if self.protocol_link:
footnotes.append(self.protocol_link)
if self.column_capacity_ug:
footnotes.append(f"Column capacity: {self.column_capacity_ug} µg")
if self.product_tags and self.show_product_tags:
product_tags = oxford_comma(self.product_tags) + ' '
else:
product_tags = ''
p += pl
pl += f"Purify {product_tags}using {self.protocol_name}{p.add_footnotes(*footnotes)}:"
pl += ul
if self.spin_speed_g:
ul += f"Perform all spin steps at {self.spin_speed_g}g."
## Dilute
if x := self.target_sample_volume_uL:
v = self.sample_volume_uL
if not isinstance(x, dict):
target = f'{x} µL'
skip = v and v == x
self.sample_volume_uL = x
elif 'min' in x and 'max' in x:
target = f"between {x['min']}–{x['max']} µL"
skip = v and x['min'] <= v <= x['max']
elif 'min' in x:
target = f"at least {x['min']} µL"
skip = v and x['min'] <= v
elif 'max' in x:
target = f"at most {x['max']} µL"
skip = v and v <= x['max']
if not skip:
ul += f"Ensure that the sample is {target}."
## Bind
bind_params = zip_params(
self.bind_buffer,
self.bind_volume_x,
self.bind_volume_uL,
)
for bind_buffer, bind_volume_x, bind_volume_uL in bind_params:
bind_volume = resolve_volume(bind_volume_uL, bind_volume_x, self.sample_volume_uL)
ul += f"Add {bind_volume} {bind_buffer} to the crude {self.sample_type}."
if self.ph_buffer:
ph_volume = resolve_volume(self.ph_volume_uL, self.ph_volume_x, self.sample_volume_uL)
ul += f"If not {self.ph_color}: Add {ph_volume} {self.ph_buffer}."
ul += f"Load on a {self.column_name}."
ul += flush_column(self.bind_spin_sec, self.bind_vacuum)
ul = break_if_too_long(pl, ul)
## Wash
wash_params = zip_params(
self.wash_buffer,
self.wash_volume_uL,
self.wash_spin_sec,
self.wash_vacuum,
)
for wash_buffer, wash_volume_uL, wash_spin_sec, wash_vacuum in wash_params:
ul += f"Add {wash_volume_uL} µL {wash_buffer}."
ul += flush_column(wash_spin_sec, wash_vacuum)
## Dry
if self.dry_spin_sec:
ul += flush_column(self.dry_spin_sec)
ul = break_if_too_long(pl, ul)
## Elute
if self.elute_volume_uL < self.elute_min_volume_uL:
warn(f"Elution volume ({self.elute_volume_uL} µL) is below the recommended minimum ({self.elute_min_volume_uL} µL).")
ul += f"Add {self.elute_volume_uL} µL {self.elute_buffer}."
if self.elute_wait_sec:
ul += f"Wait at least {format_sec(self.elute_wait_sec)}."
ul += flush_column(self.elute_spin_sec, keep_flowthrough=True)
return p
def get_product_conc(self):
v0 = unanimous(x.precursor.volume for x in self.products)
c0 = unanimous(x.precursor.conc for x in self.products)
return c0 * (v0 / self.product_volume)
def get_product_volume(self):
return Quantity(self.elute_volume_uL, 'µL')
def zip_params(*params):
from itertools import repeat
from more_itertools import always_iterable
yield from zip(*(
always_iterable(p or repeat(p))
for p in params
))
def resolve_volume(volume_uL, volume_x, sample_volume_uL):
if volume_uL:
return f'{volume_uL} µL'
elif sample_volume_uL:
return f'{volume_x * sample_volume_uL} µL'
else:
return f'{volume_x} volumes'
def flush_column(spin_time_sec, use_vacuum=False, keep_flowthrough=False):
if use_vacuum:
return "Apply vacuum."
else:
if not spin_time_sec:
raise ValueError("no spin time specified")
return f"Spin {format_sec(spin_time_sec)}; {'keep' if keep_flowthrough else 'discard'} flow-through."
if __name__ == '__main__':
SpinCleanup.main() | 0.757256 | 0.427755 |
from invenio_indexer.api import RecordIndexer
from invenio_records_rest.utils import allow_all
from .api import Loan
from .links import loan_links_factory
from .search import LoansSearch
from .transitions.transitions import CreatedToItemOnLoan, CreatedToPending, \
ItemAtDeskToItemOnLoan, ItemInTransitHouseToItemReturned, \
ItemOnLoanToItemInTransitHouse, ItemOnLoanToItemOnLoan, \
ItemOnLoanToItemReturned, PendingToItemAtDesk, \
PendingToItemInTransitPickup
from .utils import get_default_extension_duration, \
get_default_extension_max_count, get_default_loan_duration, \
is_item_available, is_loan_duration_valid, item_exists, \
item_location_retriever, patron_exists
_CIRCULATION_LOAN_PID_TYPE = 'loanid'
"""."""
_CIRCULATION_LOAN_MINTER = 'loanid'
"""."""
_CIRCULATION_LOAN_FETCHER = 'loanid'
"""."""
_Loan_PID = 'pid(loanid,record_class="invenio_circulation.api:Loan")'
"""."""
_CIRCULATION_LOAN_LINKS_FACTORY = loan_links_factory
"""."""
CIRCULATION_ITEMS_RETRIEVER_FROM_DOCUMENT = None
"""Function that returns a list of item pids given a document pid."""
CIRCULATION_DOCUMENT_RETRIEVER_FROM_ITEM = None
"""Function that returns the document pid of a given item pid."""
CIRCULATION_PERMISSION_FACTORY = allow_all
"""."""
CIRCULATION_STATES_ITEM_AVAILABLE = ['ITEM_RETURNED']
"""."""
CIRCULATION_LOAN_TRANSITIONS = {
'CREATED': [
dict(dest='PENDING', trigger='request', transition=CreatedToPending),
dict(dest='ITEM_ON_LOAN', trigger='checkout',
transition=CreatedToItemOnLoan)
],
'PENDING': [
dict(dest='ITEM_AT_DESK', transition=PendingToItemAtDesk),
dict(dest='ITEM_IN_TRANSIT_FOR_PICKUP',
transition=PendingToItemInTransitPickup),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_AT_DESK': [
dict(dest='ITEM_ON_LOAN', transition=ItemAtDeskToItemOnLoan),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_IN_TRANSIT_FOR_PICKUP': [
dict(dest='ITEM_AT_DESK'),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_ON_LOAN': [
dict(dest='ITEM_RETURNED', transition=ItemOnLoanToItemReturned),
dict(dest='ITEM_IN_TRANSIT_TO_HOUSE',
transition=ItemOnLoanToItemInTransitHouse),
dict(dest='ITEM_ON_LOAN', transition=ItemOnLoanToItemOnLoan,
trigger='extend'),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_IN_TRANSIT_TO_HOUSE': [
dict(dest='ITEM_RETURNED',
transition=ItemInTransitHouseToItemReturned),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_RETURNED': [],
'CANCELLED': [],
}
"""."""
CIRCULATION_LOAN_INITIAL_STATE = 'CREATED'
"""."""
CIRCULATION_PATRON_EXISTS = patron_exists
"""."""
CIRCULATION_ITEM_EXISTS = item_exists
"""."""
CIRCULATION_ITEM_LOCATION_RETRIEVER = item_location_retriever
"""."""
CIRCULATION_POLICIES = dict(
checkout=dict(
duration_default=get_default_loan_duration,
duration_validate=is_loan_duration_valid,
item_available=is_item_available
),
extension=dict(
from_end_date=True,
duration_default=get_default_extension_duration,
max_count=get_default_extension_max_count
),
)
"""."""
CIRCULATION_REST_ENDPOINTS = dict(
loanid=dict(
pid_type=_CIRCULATION_LOAN_PID_TYPE,
pid_minter=_CIRCULATION_LOAN_MINTER,
pid_fetcher=_CIRCULATION_LOAN_FETCHER,
search_class=LoansSearch,
search_type=None,
record_class=Loan,
record_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_response'),
},
search_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_search'),
},
list_route='/circulation/loan/',
item_route='/circulation/loan/<{0}:pid_value>'.format(_Loan_PID),
default_media_type='application/json',
links_factory_imp=_CIRCULATION_LOAN_LINKS_FACTORY,
max_result_window=10000,
error_handlers=dict(),
),
)
"""."""
CIRCULATION_REST_PERMISSION_FACTORIES = {}
""".""" | invenio_circulation/config.py | from invenio_indexer.api import RecordIndexer
from invenio_records_rest.utils import allow_all
from .api import Loan
from .links import loan_links_factory
from .search import LoansSearch
from .transitions.transitions import CreatedToItemOnLoan, CreatedToPending, \
ItemAtDeskToItemOnLoan, ItemInTransitHouseToItemReturned, \
ItemOnLoanToItemInTransitHouse, ItemOnLoanToItemOnLoan, \
ItemOnLoanToItemReturned, PendingToItemAtDesk, \
PendingToItemInTransitPickup
from .utils import get_default_extension_duration, \
get_default_extension_max_count, get_default_loan_duration, \
is_item_available, is_loan_duration_valid, item_exists, \
item_location_retriever, patron_exists
_CIRCULATION_LOAN_PID_TYPE = 'loanid'
"""."""
_CIRCULATION_LOAN_MINTER = 'loanid'
"""."""
_CIRCULATION_LOAN_FETCHER = 'loanid'
"""."""
_Loan_PID = 'pid(loanid,record_class="invenio_circulation.api:Loan")'
"""."""
_CIRCULATION_LOAN_LINKS_FACTORY = loan_links_factory
"""."""
CIRCULATION_ITEMS_RETRIEVER_FROM_DOCUMENT = None
"""Function that returns a list of item pids given a document pid."""
CIRCULATION_DOCUMENT_RETRIEVER_FROM_ITEM = None
"""Function that returns the document pid of a given item pid."""
CIRCULATION_PERMISSION_FACTORY = allow_all
"""."""
CIRCULATION_STATES_ITEM_AVAILABLE = ['ITEM_RETURNED']
"""."""
CIRCULATION_LOAN_TRANSITIONS = {
'CREATED': [
dict(dest='PENDING', trigger='request', transition=CreatedToPending),
dict(dest='ITEM_ON_LOAN', trigger='checkout',
transition=CreatedToItemOnLoan)
],
'PENDING': [
dict(dest='ITEM_AT_DESK', transition=PendingToItemAtDesk),
dict(dest='ITEM_IN_TRANSIT_FOR_PICKUP',
transition=PendingToItemInTransitPickup),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_AT_DESK': [
dict(dest='ITEM_ON_LOAN', transition=ItemAtDeskToItemOnLoan),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_IN_TRANSIT_FOR_PICKUP': [
dict(dest='ITEM_AT_DESK'),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_ON_LOAN': [
dict(dest='ITEM_RETURNED', transition=ItemOnLoanToItemReturned),
dict(dest='ITEM_IN_TRANSIT_TO_HOUSE',
transition=ItemOnLoanToItemInTransitHouse),
dict(dest='ITEM_ON_LOAN', transition=ItemOnLoanToItemOnLoan,
trigger='extend'),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_IN_TRANSIT_TO_HOUSE': [
dict(dest='ITEM_RETURNED',
transition=ItemInTransitHouseToItemReturned),
dict(dest='CANCELLED', trigger='cancel')
],
'ITEM_RETURNED': [],
'CANCELLED': [],
}
"""."""
CIRCULATION_LOAN_INITIAL_STATE = 'CREATED'
"""."""
CIRCULATION_PATRON_EXISTS = patron_exists
"""."""
CIRCULATION_ITEM_EXISTS = item_exists
"""."""
CIRCULATION_ITEM_LOCATION_RETRIEVER = item_location_retriever
"""."""
CIRCULATION_POLICIES = dict(
checkout=dict(
duration_default=get_default_loan_duration,
duration_validate=is_loan_duration_valid,
item_available=is_item_available
),
extension=dict(
from_end_date=True,
duration_default=get_default_extension_duration,
max_count=get_default_extension_max_count
),
)
"""."""
CIRCULATION_REST_ENDPOINTS = dict(
loanid=dict(
pid_type=_CIRCULATION_LOAN_PID_TYPE,
pid_minter=_CIRCULATION_LOAN_MINTER,
pid_fetcher=_CIRCULATION_LOAN_FETCHER,
search_class=LoansSearch,
search_type=None,
record_class=Loan,
record_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_response'),
},
search_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_search'),
},
list_route='/circulation/loan/',
item_route='/circulation/loan/<{0}:pid_value>'.format(_Loan_PID),
default_media_type='application/json',
links_factory_imp=_CIRCULATION_LOAN_LINKS_FACTORY,
max_result_window=10000,
error_handlers=dict(),
),
)
"""."""
CIRCULATION_REST_PERMISSION_FACTORIES = {}
""".""" | 0.642432 | 0.086825 |
import socket
import os
import sys
import threading
from select import select
from pasync._compat import (
Empty, Full, iteritems, BytesIO, recv, b, byte_to_chr,
nativerstr
)
from pasync._compat import LifoQueue, Queue
from pasync.hooks import task_callback_hook
from pasync.exceptions import (
PAsyncError,
TimeoutError,
ConnectionError,
SocketQueueError,
SocketRecvQueueFullError,
SocketRecvQueueEmptyError,
InvalidResponse,
ResponseError,
ExecAbortError,
BusyLoadingError,
NoScriptError,
ReadOnlyError
)
from pasync.utils import json_encode, json_decode
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': {
'max number of clients reached': ConnectionError
},
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError
}
def parser_error(self, response):
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
excepttion_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(excepttion_class, dict):
excepttion_class = excepttion_class.get(response, ResponseError)
return excepttion_class(response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
self.bytes_written = 0
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = recv(self._sock, socket_read_size)
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length += 2
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except:
pass
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0], response[1:])
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
if byte == '-':
response = nativerstr(response)
error = self.parser_error(response)
if isinstance(error, ConnectionError):
raise error
return error
elif byte == '+':
pass
elif byte == ':':
response = long(response)
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for _ in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class Connection(object):
"""Manages TCP communication to and from QServer"""
description_format = "Connection<host={}, port={}>"
def __init__(self, host="localhost", port=1234, socket_timeout=None,
socket_connect_timeout=None, socket_keepalive=False,
socket_keepalive_options=None, retry_on_time=False,
encoding='utf-8', encoding_errors='strict', queue_class=Queue,
queue_timeout=5, queue_max_size=100, decode_responses=False,
parser_class=PythonParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_time = retry_on_time
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self.queue_class = queue_class
self.queue_max_size = queue_max_size
self.queue_timeout = queue_timeout
self.socket_read_size = socket_read_size
self._sock = None
self._parser = parser_class(socket_read_size)
self._connect_callback = []
# register callback
task_callback_hook.register(self._set_result)
self.task_id = 0
self._init_queue()
def _init_queue(self):
self.queue = self.queue_class(maxsize=self.queue_max_size)
def __repr__(self):
return self.description_format.format(self.host, self.port)
def register_connect_callback(self, callback):
self._connect_callback.append(callback)
def clear_connect_callback(self):
self._connect_callback = []
def connect(self):
if self._sock:
return
try:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except PAsyncError:
self.disconnect()
raise
for callback in self._connect_callback:
if callable(callback):
callback(self)
def _connect(self):
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
sock.settimeout(self.socket_connect_timeout)
sock.connect(socket_address)
sock.settimeout(self.socket_timeout)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
if len(exception.args) == 1:
return "Error connection to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
self._parser.on_connect(self)
def disconnect(self):
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send(self, data, ack=True, **kwargs):
task = {
'task_id': self.task_id,
'task_content': data,
'task_params': kwargs
}
if self._sock is None:
raise ConnectionError("Socket has not created!!")
try:
self._sock.sendall(json_encode(task))
received = json_decode(self._sock.recv(self.socket_read_size))
# After received ack
self.task_id += 1
if ack:
if received.get('task_ack') is True:
pass
else:
print received.get('msg')
except Exception:
self.disconnect()
raise
def can_read(self, timeout=0):
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
return b(str(value))
elif isinstance(value, float):
return b(repr(value))
elif not isinstance(value, basestring):
value = unicode(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def _set_result(self, ret):
if not hasattr(self, "queue"):
raise SocketQueueError("Socket queue has not Initialized")
if self.queue.qsize() < self.queue_max_size:
self.queue.put_nowait(ret)
else:
try:
self.queue.put(ret, timeout=self.queue_timeout)
except Full:
raise SocketRecvQueueFullError(
"Socket result has too many results hasn't been consume."
"use **conn.get(result)** to consume."
)
def get_result(self, timeout=5):
if self.queue.qsize() > 0:
return self.queue.get_nowait()
try:
return self.queue.get(timeout=self.queue_timeout)
except Empty:
raise SocketRecvQueueEmptyError("No reslut.")
class ConnectionPool(object):
def __init__(self, connection_class=Connection, max_connections=50,
timeout=20, queue_class=LifoQueue, **connection_kwargs):
self.connection_class = connection_class
self.queue_class = queue_class
self.timeout = timeout
self.max_connections = max_connections
self.connection_kwargs = connection_kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
self._connections = []
def _check_pid(self):
"Check if has changed process."
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
return
self.disconnect()
self.reset()
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self):
self._check_pid()
connection = None
try:
connection = self.pool.get(timeout=self.timeout)
except Empty:
raise ConnectionError("No connection available.")
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Release the connection back to the pool."
self._check_pid()
if connection.pid != self.pid:
return
# Put the connetion back to the pool.
try:
self.pool.put_nowait(connection)
except Full:
pass
def disconnect(self):
for connection in self._connections:
connection.disconnect() | pasync/connection.py |
import socket
import os
import sys
import threading
from select import select
from pasync._compat import (
Empty, Full, iteritems, BytesIO, recv, b, byte_to_chr,
nativerstr
)
from pasync._compat import LifoQueue, Queue
from pasync.hooks import task_callback_hook
from pasync.exceptions import (
PAsyncError,
TimeoutError,
ConnectionError,
SocketQueueError,
SocketRecvQueueFullError,
SocketRecvQueueEmptyError,
InvalidResponse,
ResponseError,
ExecAbortError,
BusyLoadingError,
NoScriptError,
ReadOnlyError
)
from pasync.utils import json_encode, json_decode
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': {
'max number of clients reached': ConnectionError
},
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError
}
def parser_error(self, response):
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
excepttion_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(excepttion_class, dict):
excepttion_class = excepttion_class.get(response, ResponseError)
return excepttion_class(response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
self.bytes_written = 0
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = recv(self._sock, socket_read_size)
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length += 2
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except:
pass
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0], response[1:])
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
if byte == '-':
response = nativerstr(response)
error = self.parser_error(response)
if isinstance(error, ConnectionError):
raise error
return error
elif byte == '+':
pass
elif byte == ':':
response = long(response)
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for _ in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class Connection(object):
"""Manages TCP communication to and from QServer"""
description_format = "Connection<host={}, port={}>"
def __init__(self, host="localhost", port=1234, socket_timeout=None,
socket_connect_timeout=None, socket_keepalive=False,
socket_keepalive_options=None, retry_on_time=False,
encoding='utf-8', encoding_errors='strict', queue_class=Queue,
queue_timeout=5, queue_max_size=100, decode_responses=False,
parser_class=PythonParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_time = retry_on_time
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self.queue_class = queue_class
self.queue_max_size = queue_max_size
self.queue_timeout = queue_timeout
self.socket_read_size = socket_read_size
self._sock = None
self._parser = parser_class(socket_read_size)
self._connect_callback = []
# register callback
task_callback_hook.register(self._set_result)
self.task_id = 0
self._init_queue()
def _init_queue(self):
self.queue = self.queue_class(maxsize=self.queue_max_size)
def __repr__(self):
return self.description_format.format(self.host, self.port)
def register_connect_callback(self, callback):
self._connect_callback.append(callback)
def clear_connect_callback(self):
self._connect_callback = []
def connect(self):
if self._sock:
return
try:
sock = self._connect()
except socket.timeout:
raise TimeoutError("Timeout connecting to server")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except PAsyncError:
self.disconnect()
raise
for callback in self._connect_callback:
if callable(callback):
callback(self)
def _connect(self):
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
sock.settimeout(self.socket_connect_timeout)
sock.connect(socket_address)
sock.settimeout(self.socket_timeout)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
if len(exception.args) == 1:
return "Error connection to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
self._parser.on_connect(self)
def disconnect(self):
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send(self, data, ack=True, **kwargs):
task = {
'task_id': self.task_id,
'task_content': data,
'task_params': kwargs
}
if self._sock is None:
raise ConnectionError("Socket has not created!!")
try:
self._sock.sendall(json_encode(task))
received = json_decode(self._sock.recv(self.socket_read_size))
# After received ack
self.task_id += 1
if ack:
if received.get('task_ack') is True:
pass
else:
print received.get('msg')
except Exception:
self.disconnect()
raise
def can_read(self, timeout=0):
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
return b(str(value))
elif isinstance(value, float):
return b(repr(value))
elif not isinstance(value, basestring):
value = unicode(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def _set_result(self, ret):
if not hasattr(self, "queue"):
raise SocketQueueError("Socket queue has not Initialized")
if self.queue.qsize() < self.queue_max_size:
self.queue.put_nowait(ret)
else:
try:
self.queue.put(ret, timeout=self.queue_timeout)
except Full:
raise SocketRecvQueueFullError(
"Socket result has too many results hasn't been consume."
"use **conn.get(result)** to consume."
)
def get_result(self, timeout=5):
if self.queue.qsize() > 0:
return self.queue.get_nowait()
try:
return self.queue.get(timeout=self.queue_timeout)
except Empty:
raise SocketRecvQueueEmptyError("No reslut.")
class ConnectionPool(object):
def __init__(self, connection_class=Connection, max_connections=50,
timeout=20, queue_class=LifoQueue, **connection_kwargs):
self.connection_class = connection_class
self.queue_class = queue_class
self.timeout = timeout
self.max_connections = max_connections
self.connection_kwargs = connection_kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
self._connections = []
def _check_pid(self):
"Check if has changed process."
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
return
self.disconnect()
self.reset()
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self):
self._check_pid()
connection = None
try:
connection = self.pool.get(timeout=self.timeout)
except Empty:
raise ConnectionError("No connection available.")
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Release the connection back to the pool."
self._check_pid()
if connection.pid != self.pid:
return
# Put the connetion back to the pool.
try:
self.pool.put_nowait(connection)
except Full:
pass
def disconnect(self):
for connection in self._connections:
connection.disconnect() | 0.45423 | 0.079639 |
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import array_to_img
from tensorflow import keras
from tensorflow.keras.preprocessing import image_dataset_from_directory
from keras.callbacks import TensorBoard
from time import time
import tensorflow as tf
import os
import json
import base64
from io import BytesIO
from datetime import datetime
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import models as models
import numpy as np
import locale
def train_target_score_model(imagesPath, modelPath, tensorboard_gcs_logs, epochs_count, batch_size):
train_images_dir = imagesPath
scores_file = os.path.join(imagesPath, "scores.json")
IMAGE_SIZE = 128
with open(scores_file) as f:
train_data = json.load(f)
labels = []
img_data = []
img_index = 0
#tensorboard_gcs_logs = 'gs://dpa23/tarsanlogs'
img_to_show = []
def add_image(img, rotation, score):
labels.append(score)
if rotation != 0:
r_image = img.rotate(rotation)
else:
r_image = img
img_array = img_to_array(r_image) / 255.0
img_data.append(img_array)
for file_name, score in train_data.items():
full_file_name = os.path.join(train_images_dir, file_name)
print("Loading {} with score {}".format(full_file_name, score))
img = load_img(full_file_name, color_mode="grayscale", target_size=(IMAGE_SIZE,IMAGE_SIZE), interpolation='bilinear')
if img_index < 10:
buffered = BytesIO()
img.save(buffered, format="PNG")
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
img_to_show.append((img_base64, score, 0.0))
add_image(img, 0, score)
add_image(img, 45, score)
add_image(img, 90, score)
add_image(img, 135, score)
img_index = img_index + 1
np_labels = np.asfarray(labels)
np_image_data = np.asfarray(img_data)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
split = train_test_split(np_labels, np_image_data, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
# find the largest score in the training set and use it to
# scale the scores to the range [0, 1]
maxScore = trainAttrX.max()
trainY = trainAttrX / maxScore
testY = testAttrX / maxScore
# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# score *predictions* and the *actual score*
model = models.create_cnn(IMAGE_SIZE, IMAGE_SIZE, 1, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# Define Tensorboard as a Keras callback
tensorboard = TensorBoard(
log_dir=tensorboard_gcs_logs +'/' + datetime.now().strftime("%Y%m%d-%H%M%S"),
histogram_freq=5,
write_images=True,
update_freq='epoch'
)
keras_callbacks = []
if len(tensorboard_gcs_logs) > 2:
keras_callbacks = [
tensorboard
]
# train the model
print("[INFO] training model...")
model.fit(trainImagesX, trainY, validation_data=(testImagesX, testY),
epochs=epochs_count, batch_size=batch_size, callbacks=keras_callbacks)
print("[INFO] saving model to {} ...".format(modelPath))
model.save(modelPath)
# make predictions on the testing data
print("[INFO] predicting scores prices...")
preds = model.predict(testImagesX)
# compute the difference between the *predicted* scores and the
# *actual* scores, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
print("[INFO] avg. score: {}, std score: {}".format(
np_labels.mean(),
np_labels.std()))
metrics = {
'metrics': [{
'name': 'diff-mean',
'numberValue': mean,
'format': "PERCENTAGE",
}]
}
with open('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
img_html = '<table><tr><th>Target</th><th>Actual Score</th><th>Predicted Score</th></tr>'
for (img_b64, s1, s2) in img_to_show:
html_line = '<tr><td><img src="data:image/png;base64, {}" alt="Target Example"></td><td>{}</td><td>{}</td></tr>'.format(img_b64, s1, s2)
img_html = img_html + html_line
img_html = img_html + '</table>'
metadata = {
'outputs' : [
{
'storage':'inline',
'source': 'Markdown text',
'type': 'markdown',
},
{
'type': 'web-app',
'storage': 'inline',
'source': img_html,
},
{
'type': 'tensorboard',
'source': tensorboard_gcs_logs,
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std)) | train_model/train_score_model.py | from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import array_to_img
from tensorflow import keras
from tensorflow.keras.preprocessing import image_dataset_from_directory
from keras.callbacks import TensorBoard
from time import time
import tensorflow as tf
import os
import json
import base64
from io import BytesIO
from datetime import datetime
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import models as models
import numpy as np
import locale
def train_target_score_model(imagesPath, modelPath, tensorboard_gcs_logs, epochs_count, batch_size):
train_images_dir = imagesPath
scores_file = os.path.join(imagesPath, "scores.json")
IMAGE_SIZE = 128
with open(scores_file) as f:
train_data = json.load(f)
labels = []
img_data = []
img_index = 0
#tensorboard_gcs_logs = 'gs://dpa23/tarsanlogs'
img_to_show = []
def add_image(img, rotation, score):
labels.append(score)
if rotation != 0:
r_image = img.rotate(rotation)
else:
r_image = img
img_array = img_to_array(r_image) / 255.0
img_data.append(img_array)
for file_name, score in train_data.items():
full_file_name = os.path.join(train_images_dir, file_name)
print("Loading {} with score {}".format(full_file_name, score))
img = load_img(full_file_name, color_mode="grayscale", target_size=(IMAGE_SIZE,IMAGE_SIZE), interpolation='bilinear')
if img_index < 10:
buffered = BytesIO()
img.save(buffered, format="PNG")
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
img_to_show.append((img_base64, score, 0.0))
add_image(img, 0, score)
add_image(img, 45, score)
add_image(img, 90, score)
add_image(img, 135, score)
img_index = img_index + 1
np_labels = np.asfarray(labels)
np_image_data = np.asfarray(img_data)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
split = train_test_split(np_labels, np_image_data, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
# find the largest score in the training set and use it to
# scale the scores to the range [0, 1]
maxScore = trainAttrX.max()
trainY = trainAttrX / maxScore
testY = testAttrX / maxScore
# create our Convolutional Neural Network and then compile the model
# using mean absolute percentage error as our loss, implying that we
# seek to minimize the absolute percentage difference between our
# score *predictions* and the *actual score*
model = models.create_cnn(IMAGE_SIZE, IMAGE_SIZE, 1, regress=True)
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# Define Tensorboard as a Keras callback
tensorboard = TensorBoard(
log_dir=tensorboard_gcs_logs +'/' + datetime.now().strftime("%Y%m%d-%H%M%S"),
histogram_freq=5,
write_images=True,
update_freq='epoch'
)
keras_callbacks = []
if len(tensorboard_gcs_logs) > 2:
keras_callbacks = [
tensorboard
]
# train the model
print("[INFO] training model...")
model.fit(trainImagesX, trainY, validation_data=(testImagesX, testY),
epochs=epochs_count, batch_size=batch_size, callbacks=keras_callbacks)
print("[INFO] saving model to {} ...".format(modelPath))
model.save(modelPath)
# make predictions on the testing data
print("[INFO] predicting scores prices...")
preds = model.predict(testImagesX)
# compute the difference between the *predicted* scores and the
# *actual* scores, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
print("[INFO] avg. score: {}, std score: {}".format(
np_labels.mean(),
np_labels.std()))
metrics = {
'metrics': [{
'name': 'diff-mean',
'numberValue': mean,
'format': "PERCENTAGE",
}]
}
with open('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
img_html = '<table><tr><th>Target</th><th>Actual Score</th><th>Predicted Score</th></tr>'
for (img_b64, s1, s2) in img_to_show:
html_line = '<tr><td><img src="data:image/png;base64, {}" alt="Target Example"></td><td>{}</td><td>{}</td></tr>'.format(img_b64, s1, s2)
img_html = img_html + html_line
img_html = img_html + '</table>'
metadata = {
'outputs' : [
{
'storage':'inline',
'source': 'Markdown text',
'type': 'markdown',
},
{
'type': 'web-app',
'storage': 'inline',
'source': img_html,
},
{
'type': 'tensorboard',
'source': tensorboard_gcs_logs,
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std)) | 0.702632 | 0.370823 |
from django.contrib.admin import RelatedFieldListFilter, ChoicesFieldListFilter
from django.contrib.admin.util import get_model_from_relation
from django.utils.encoding import smart_text
from client_admin.utils import get_admin_change_url
class LookupFilter(RelatedFieldListFilter):
template = "admin/lookup_filter.html"
def __init__(self, field, request, params, model, model_admin, field_path):
"""cut/pasted from RelatedFieldListFilter parent constructor, with db hit removed"""
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull, None)
# this is the one change from the parent constructor.
# instead of getting all choices from the table, only pick one if theres already one set so we can display it
self.lookup_choices = [(-1,""),(-2,"")] # needs at least TWO or admin wont show it if empty.
if self.lookup_val:
try:
obj = field.rel.to.objects.get(pk=self.lookup_val)
val = obj.__unicode__()
except field.rel.to.DoesNotExist:
val = ""
pass
self.lookup_choices.append((self.lookup_val,val))
# note we are deliberately calling our parent's parent constructor
super(RelatedFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def choices(self, cl):
yield {
'lookup_url': "%s?t=id" % get_admin_change_url(self.field.rel.to),
'query_string': cl.get_query_string({self.lookup_kwarg: "PLACEHOLDER"}),
'filter_name': self.field.rel.to._meta.model_name,
}
for pk_val, val in self.lookup_choices:
if self.lookup_val == smart_text(pk_val):
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.lookup_val:
yield {
'selected': not self.lookup_val,
'query_string': '?',
'display': 'Remove filter',
}
class SelectFilter(ChoicesFieldListFilter):
template = "admin/select_filter.html" | client_admin/filters.py |
from django.contrib.admin import RelatedFieldListFilter, ChoicesFieldListFilter
from django.contrib.admin.util import get_model_from_relation
from django.utils.encoding import smart_text
from client_admin.utils import get_admin_change_url
class LookupFilter(RelatedFieldListFilter):
template = "admin/lookup_filter.html"
def __init__(self, field, request, params, model, model_admin, field_path):
"""cut/pasted from RelatedFieldListFilter parent constructor, with db hit removed"""
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull, None)
# this is the one change from the parent constructor.
# instead of getting all choices from the table, only pick one if theres already one set so we can display it
self.lookup_choices = [(-1,""),(-2,"")] # needs at least TWO or admin wont show it if empty.
if self.lookup_val:
try:
obj = field.rel.to.objects.get(pk=self.lookup_val)
val = obj.__unicode__()
except field.rel.to.DoesNotExist:
val = ""
pass
self.lookup_choices.append((self.lookup_val,val))
# note we are deliberately calling our parent's parent constructor
super(RelatedFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def choices(self, cl):
yield {
'lookup_url': "%s?t=id" % get_admin_change_url(self.field.rel.to),
'query_string': cl.get_query_string({self.lookup_kwarg: "PLACEHOLDER"}),
'filter_name': self.field.rel.to._meta.model_name,
}
for pk_val, val in self.lookup_choices:
if self.lookup_val == smart_text(pk_val):
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.lookup_val:
yield {
'selected': not self.lookup_val,
'query_string': '?',
'display': 'Remove filter',
}
class SelectFilter(ChoicesFieldListFilter):
template = "admin/select_filter.html" | 0.319334 | 0.078642 |
import bs4
import csv
import requests
import re
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class Steam:
def __init__(self, driver):
"""Declare values needed for the class.
Args:
driver (class): Firefox webdriver
"""
self.driver = driver
# Replace with your own profile link
user_url = "https://steamcommunity.com/id/FractalNoise/games/?tab=all"
self.driver.get(user_url)
print("Acquiring url data...")
self.soup = bs4.BeautifulSoup(self.driver.page_source, "lxml")
def game_count(self):
"""Counts how many games are in your library.
Returns:
int: Number of games in your library
"""
return len(self.soup.find_all("div", class_="gameListRowItemName ellipsis"))
def page_number(self):
"""Finds the specific set of numbers used to find the store page of the game.
Returns:
str: Game page id
"""
return (
page_id["id"].split("_")[1]
for page_id in self.soup.find_all("div", class_="gameListRow")
)
def hours_played(self):
"""Generator object that returns the number of hours played.
Returns:
str: Number of hours played
"""
return (
hours.text
for hours in self.soup.find_all("h5", class_="ellipsis hours_played")
)
class Games:
def __init__(self, next_page, game_hours, write_header_count):
"""Declare values needed for the class.
Args:
next_page (str): Game page id
game_hours (str): Number of hours played
write_header_count (int): A count of how many times the header has been written
"""
self.game_dict = {}
self.write_header_count = write_header_count
self.next_page = next_page
self.game_dict["Hours Played"] = game_hours
# Acquire the game page html
req = requests.get(f"https://store.steampowered.com/app/{self.next_page}")
self.page_soup = bs4.BeautifulSoup(req.text, "lxml")
def game_name(self):
"""Grabs the title of the game."""
title = self.page_soup.find("div", class_="apphub_AppName")
# Checks if the game title exists on the page
if title is not None:
self.game_dict["Game Title"] = title.text
def description(self):
"""Grabs the description of the game."""
desc = self.page_soup.find("div", class_="game_description_snippet")
# Checks if the game description exists on the page
if desc is not None:
self.game_dict["Description"] = desc.text.strip()
def release_date(self):
"""Grabs the release date of the game."""
date = self.page_soup.find("div", class_="date")
# Checks if the release date exists on the page
if date is not None:
self.game_dict["Release Date"] = date.text
def recent_reviews(self):
"""Grabs the recent reviews section of the game."""
recent = self.page_soup.find("div", class_="summary column")
# Checks if recent reviews exists on the page
if recent is not None:
self.game_dict["Recent Reviews"] = " ".join(recent.text.split())
def all_reviews(self):
"""Grabs the all reviews section of the game."""
all_time = self.page_soup.find("div", class_="summary column")
# Checks if all reviews exists on the page
if all_time is not None:
all_time = all_time.find_next("div", class_="summary column")
if all_time is not None:
self.game_dict["All Reviews"] = " ".join(all_time.text.split())
def tags(self):
"""Grabs the game tags."""
tags_list = self.page_soup.find_all("a", class_="app_tag", limit=5)
# Checks if game tags exist on the page
if tags_list is not None:
self.game_dict["Tags"] = ", ".join([tag.text.strip() for tag in tags_list])
def write(self):
"""Opens and writes the data to the csv file."""
with open("steam_backlog.csv", "a", newline="", encoding="utf-8") as steam_file:
fieldnames = [
"Game Title",
"Description",
"Hours Played",
"Release Date",
"Recent Reviews",
"All Reviews",
"Tags",
]
csv_writer = csv.DictWriter(steam_file, fieldnames=fieldnames)
# Checks if the header row has already been written
if self.write_header_count == 0:
csv_writer.writeheader()
csv_writer.writerow(self.game_dict)
# Checks if there is a game being written to the file
if "Game Title" in self.game_dict:
print(f"Writing {self.game_dict['Game Title']} to file")
def main():
write_header_count = 0
print("Initializing webdriver...")
# Make the webdriver run in headless mode
options = Options()
options.headless = True
# Replace the executable path with the path to your webdriver download
driver = webdriver.Firefox(
options=options, executable_path=r"G:\Downloads v2\geckodriver.exe"
)
# Make a new profile object
profile = Steam(driver)
# Grab info from the initial page
number_of_games = profile.game_count()
page_num_gen = profile.page_number()
hours_gen = profile.hours_played()
# Keep looping for every game in the users library
for _ in range(number_of_games):
# Advance the generator
next_page = next(page_num_gen)
game_hours = next(hours_gen)
# Make a new game object
game = Games(next_page, game_hours, write_header_count)
# Grab all the info from the game page
game.game_name()
game.description()
game.release_date()
game.recent_reviews()
game.all_reviews()
game.tags()
# Write to the file
game.write()
write_header_count += 1
# Wait two seconds so the server doesn't get bombarded with requests
time.sleep(2)
print("Process complete")
# Exit the firefox webdriver
driver.quit()
if __name__ == "__main__":
main() | main.py | import bs4
import csv
import requests
import re
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class Steam:
def __init__(self, driver):
"""Declare values needed for the class.
Args:
driver (class): Firefox webdriver
"""
self.driver = driver
# Replace with your own profile link
user_url = "https://steamcommunity.com/id/FractalNoise/games/?tab=all"
self.driver.get(user_url)
print("Acquiring url data...")
self.soup = bs4.BeautifulSoup(self.driver.page_source, "lxml")
def game_count(self):
"""Counts how many games are in your library.
Returns:
int: Number of games in your library
"""
return len(self.soup.find_all("div", class_="gameListRowItemName ellipsis"))
def page_number(self):
"""Finds the specific set of numbers used to find the store page of the game.
Returns:
str: Game page id
"""
return (
page_id["id"].split("_")[1]
for page_id in self.soup.find_all("div", class_="gameListRow")
)
def hours_played(self):
"""Generator object that returns the number of hours played.
Returns:
str: Number of hours played
"""
return (
hours.text
for hours in self.soup.find_all("h5", class_="ellipsis hours_played")
)
class Games:
def __init__(self, next_page, game_hours, write_header_count):
"""Declare values needed for the class.
Args:
next_page (str): Game page id
game_hours (str): Number of hours played
write_header_count (int): A count of how many times the header has been written
"""
self.game_dict = {}
self.write_header_count = write_header_count
self.next_page = next_page
self.game_dict["Hours Played"] = game_hours
# Acquire the game page html
req = requests.get(f"https://store.steampowered.com/app/{self.next_page}")
self.page_soup = bs4.BeautifulSoup(req.text, "lxml")
def game_name(self):
"""Grabs the title of the game."""
title = self.page_soup.find("div", class_="apphub_AppName")
# Checks if the game title exists on the page
if title is not None:
self.game_dict["Game Title"] = title.text
def description(self):
"""Grabs the description of the game."""
desc = self.page_soup.find("div", class_="game_description_snippet")
# Checks if the game description exists on the page
if desc is not None:
self.game_dict["Description"] = desc.text.strip()
def release_date(self):
"""Grabs the release date of the game."""
date = self.page_soup.find("div", class_="date")
# Checks if the release date exists on the page
if date is not None:
self.game_dict["Release Date"] = date.text
def recent_reviews(self):
"""Grabs the recent reviews section of the game."""
recent = self.page_soup.find("div", class_="summary column")
# Checks if recent reviews exists on the page
if recent is not None:
self.game_dict["Recent Reviews"] = " ".join(recent.text.split())
def all_reviews(self):
"""Grabs the all reviews section of the game."""
all_time = self.page_soup.find("div", class_="summary column")
# Checks if all reviews exists on the page
if all_time is not None:
all_time = all_time.find_next("div", class_="summary column")
if all_time is not None:
self.game_dict["All Reviews"] = " ".join(all_time.text.split())
def tags(self):
"""Grabs the game tags."""
tags_list = self.page_soup.find_all("a", class_="app_tag", limit=5)
# Checks if game tags exist on the page
if tags_list is not None:
self.game_dict["Tags"] = ", ".join([tag.text.strip() for tag in tags_list])
def write(self):
"""Opens and writes the data to the csv file."""
with open("steam_backlog.csv", "a", newline="", encoding="utf-8") as steam_file:
fieldnames = [
"Game Title",
"Description",
"Hours Played",
"Release Date",
"Recent Reviews",
"All Reviews",
"Tags",
]
csv_writer = csv.DictWriter(steam_file, fieldnames=fieldnames)
# Checks if the header row has already been written
if self.write_header_count == 0:
csv_writer.writeheader()
csv_writer.writerow(self.game_dict)
# Checks if there is a game being written to the file
if "Game Title" in self.game_dict:
print(f"Writing {self.game_dict['Game Title']} to file")
def main():
write_header_count = 0
print("Initializing webdriver...")
# Make the webdriver run in headless mode
options = Options()
options.headless = True
# Replace the executable path with the path to your webdriver download
driver = webdriver.Firefox(
options=options, executable_path=r"G:\Downloads v2\geckodriver.exe"
)
# Make a new profile object
profile = Steam(driver)
# Grab info from the initial page
number_of_games = profile.game_count()
page_num_gen = profile.page_number()
hours_gen = profile.hours_played()
# Keep looping for every game in the users library
for _ in range(number_of_games):
# Advance the generator
next_page = next(page_num_gen)
game_hours = next(hours_gen)
# Make a new game object
game = Games(next_page, game_hours, write_header_count)
# Grab all the info from the game page
game.game_name()
game.description()
game.release_date()
game.recent_reviews()
game.all_reviews()
game.tags()
# Write to the file
game.write()
write_header_count += 1
# Wait two seconds so the server doesn't get bombarded with requests
time.sleep(2)
print("Process complete")
# Exit the firefox webdriver
driver.quit()
if __name__ == "__main__":
main() | 0.666605 | 0.159283 |
import socket
import threading
import socketserver
import pytest
from src import nuke_tools
LOCALHOST = '127.0.0.1'
with socketserver.TCPServer((LOCALHOST, 0), None) as s:
FREE_PORT = s.server_address[1]
def socket_server():
"""Create a Server that listen for incoming requests."""
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind((LOCALHOST, FREE_PORT))
_socket.listen(1)
while True:
conn, _ = _socket.accept()
try:
data = conn.recv(2048)
conn.sendall(data)
break
except Exception: # skipcq: PYL-W0703
break
_socket.close()
conn.close()
@pytest.fixture()
def tcp_server():
"""Start the tcp server in a thread to allow async operation."""
server = threading.Thread(target=socket_server)
server.daemon = True
server.start()
yield server
server.join()
def test_send_data(tcp_server):
"""Test that send data method returns expected value."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert isinstance(data, str)
assert '[NukeTools] hello' in data
def test_connection_refused():
"""Test sending data when server is not listening."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert 'ConnectionRefusedError. {}:{}'.format(LOCALHOST, FREE_PORT) in data
def test_connection_timeout():
"""Test connection timeout."""
hostname = '192.168.1.99'
data = nuke_tools.send_data('192.168.1.99', FREE_PORT, 'hello', 0.1)
assert 'ConnectionTimeoutError. {}:{}'.format(hostname, FREE_PORT) in data
def test_connection_socket_error():
"""Test connection base exception.
Wrong hostname and port to force socket error.
"""
data = nuke_tools.send_data('172.16.17.32', 0, 'hello')
assert 'UnknownError:' in data
def test_connection_generic_exception():
"""Test connection base exception.
Convert port to string to force exception.
"""
data = nuke_tools.send_data(LOCALHOST, str(FREE_PORT), 'hello')
assert 'UnknownException:' in data | tests/test_tcp.py | import socket
import threading
import socketserver
import pytest
from src import nuke_tools
LOCALHOST = '127.0.0.1'
with socketserver.TCPServer((LOCALHOST, 0), None) as s:
FREE_PORT = s.server_address[1]
def socket_server():
"""Create a Server that listen for incoming requests."""
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind((LOCALHOST, FREE_PORT))
_socket.listen(1)
while True:
conn, _ = _socket.accept()
try:
data = conn.recv(2048)
conn.sendall(data)
break
except Exception: # skipcq: PYL-W0703
break
_socket.close()
conn.close()
@pytest.fixture()
def tcp_server():
"""Start the tcp server in a thread to allow async operation."""
server = threading.Thread(target=socket_server)
server.daemon = True
server.start()
yield server
server.join()
def test_send_data(tcp_server):
"""Test that send data method returns expected value."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert isinstance(data, str)
assert '[NukeTools] hello' in data
def test_connection_refused():
"""Test sending data when server is not listening."""
data = nuke_tools.send_data(LOCALHOST, FREE_PORT, 'hello')
assert 'ConnectionRefusedError. {}:{}'.format(LOCALHOST, FREE_PORT) in data
def test_connection_timeout():
"""Test connection timeout."""
hostname = '192.168.1.99'
data = nuke_tools.send_data('192.168.1.99', FREE_PORT, 'hello', 0.1)
assert 'ConnectionTimeoutError. {}:{}'.format(hostname, FREE_PORT) in data
def test_connection_socket_error():
"""Test connection base exception.
Wrong hostname and port to force socket error.
"""
data = nuke_tools.send_data('172.16.17.32', 0, 'hello')
assert 'UnknownError:' in data
def test_connection_generic_exception():
"""Test connection base exception.
Convert port to string to force exception.
"""
data = nuke_tools.send_data(LOCALHOST, str(FREE_PORT), 'hello')
assert 'UnknownException:' in data | 0.493897 | 0.178025 |
import pathlib
import csv
# Path to collect data from the folder
budget_csv = pathlib.Path('Resources/budget_data.csv')
pathout = pathlib.Path("Resources/Bank Analysis")
# Variable Track
total_months = 0
total_revenue = 0
prev_revenue = 0
revenue_change = 0
greatest_increase = [",", 0]
greatest_decrease = ["", 99999999999999999]
revenue_changes = []
# Read CSV File
with open(budget_csv) as revenue_data:
reader = csv.DictReader(revenue_data)
jan_start = next(reader)
total_months = total_months + 1
prev_revenue =int(jan_start["Profit/Losses"])
total_revenue = total_revenue + int(jan_start["Profit/Losses"])
for row in reader:
total_months = total_months + 1
total_revenue = total_revenue + int(row["Profit/Losses"])
#print(row)
revenue_change = int(row["Profit/Losses"]) - prev_revenue
# print(row['Date'])
# print(revenue_change)
prev_revenue = int(row["Profit/Losses"])
# print(prev_revenue)
if (revenue_change > greatest_increase[1]):
greatest_increase[1] = revenue_change
greatest_increase[0] = row["Date"]
if (revenue_change < greatest_decrease[1]):
greatest_decrease[1] = revenue_change
greatest_decrease[0] = row["Date"]
revenue_changes.append(revenue_change)
revenue_average = round(sum(revenue_changes) / len(revenue_changes), 2)
print()
print()
print("Financial Analysis")
print("-------------------------------")
print(f"Total Months: {total_months}")
print(f"Total Revenue: ${total_revenue}")
print(f"Average Change: ${revenue_average}")
print(f"Greatest Increase: {greatest_increase[0]} (${greatest_increase[1]})")
print(f"Greatest Decrease: {greatest_decrease[0]} (${greatest_decrease[1]})")
with open(pathout, "w") as txt_file:
txt_file.write("Financial Analysis")
txt_file.write("\n")
txt_file.write(f"Total Months: {total_months}")
txt_file.write("\n")
txt_file.write(f"Total Revenue: ${total_revenue}")
txt_file.write("\n")
txt_file.write(f"Average Change: ${revenue_average}")
txt_file.write("\n")
txt_file.write(f"Greatest Increase: {greatest_increase[0]} (${greatest_increase[1]})")
txt_file.write("\n")
txt_file.write(f"Greatest Decrease: {greatest_decrease[0]} (${greatest_decrease[1]})") | Python Bank Data and Poll Data/Bank.py | import pathlib
import csv
# Path to collect data from the folder
budget_csv = pathlib.Path('Resources/budget_data.csv')
pathout = pathlib.Path("Resources/Bank Analysis")
# Variable Track
total_months = 0
total_revenue = 0
prev_revenue = 0
revenue_change = 0
greatest_increase = [",", 0]
greatest_decrease = ["", 99999999999999999]
revenue_changes = []
# Read CSV File
with open(budget_csv) as revenue_data:
reader = csv.DictReader(revenue_data)
jan_start = next(reader)
total_months = total_months + 1
prev_revenue =int(jan_start["Profit/Losses"])
total_revenue = total_revenue + int(jan_start["Profit/Losses"])
for row in reader:
total_months = total_months + 1
total_revenue = total_revenue + int(row["Profit/Losses"])
#print(row)
revenue_change = int(row["Profit/Losses"]) - prev_revenue
# print(row['Date'])
# print(revenue_change)
prev_revenue = int(row["Profit/Losses"])
# print(prev_revenue)
if (revenue_change > greatest_increase[1]):
greatest_increase[1] = revenue_change
greatest_increase[0] = row["Date"]
if (revenue_change < greatest_decrease[1]):
greatest_decrease[1] = revenue_change
greatest_decrease[0] = row["Date"]
revenue_changes.append(revenue_change)
revenue_average = round(sum(revenue_changes) / len(revenue_changes), 2)
print()
print()
print("Financial Analysis")
print("-------------------------------")
print(f"Total Months: {total_months}")
print(f"Total Revenue: ${total_revenue}")
print(f"Average Change: ${revenue_average}")
print(f"Greatest Increase: {greatest_increase[0]} (${greatest_increase[1]})")
print(f"Greatest Decrease: {greatest_decrease[0]} (${greatest_decrease[1]})")
with open(pathout, "w") as txt_file:
txt_file.write("Financial Analysis")
txt_file.write("\n")
txt_file.write(f"Total Months: {total_months}")
txt_file.write("\n")
txt_file.write(f"Total Revenue: ${total_revenue}")
txt_file.write("\n")
txt_file.write(f"Average Change: ${revenue_average}")
txt_file.write("\n")
txt_file.write(f"Greatest Increase: {greatest_increase[0]} (${greatest_increase[1]})")
txt_file.write("\n")
txt_file.write(f"Greatest Decrease: {greatest_decrease[0]} (${greatest_decrease[1]})") | 0.13041 | 0.169922 |
import os
import sys
import numpy as np
import subprocess
import pickle
import argparse
#updating
def setup():
"""Get cmds and setup directories."""
cmdparser = argparse.ArgumentParser(description='convert kaldi PCA transform and mean into pickle format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdparser.add_argument('--kaldi_feats_path', help='path of folder where transform.mat and mean.vec stored', type=str,required=True)
cmdparser.add_argument('--dataset', help='dataset name', type=str, default="callhome1",required=True)
cmdparser.add_argument('--output_dir', help='path of SSC main folder to store pickle file', type=str, default="None",required=True)
cmdargs = cmdparser.parse_args()
return cmdargs
def kaldiPlda2numpydict(pldaFile):
#logging.debug('kaldi text file to numpy array: {}'.format(textfile))
fin = subprocess.check_output(["ivector-copy-plda", "--binary=false", pldaFile ,"-"])
res = {}
fin = fin.decode("utf-8").split('\n')
while '' in fin:
fin.remove('')
splitted = fin[0].strip().split()
res['plda_mean'] = np.asarray(splitted[2:-1]).astype(float)
tmparr=[]
for i,line in enumerate(fin[2:]):
splitted = line.strip().split()
if splitted[-1] == ']':
splitted = splitted[:-1]
tmparr.append(np.asarray(splitted).astype(float))
break
else:
tmparr.append(np.asarray(splitted).astype(float))
res['diagonalizing_transform'] = np.asarray(tmparr)
res['Psi_across_covar_diag'] = np.asarray(fin[-2].strip().split()[1:-1]).astype(float)
return res
def load_kaldi_matrices(args):
fold_local = args.kaldi_feats_path
dataset = args.dataset
out_fold = args.output_dir
os.system('mkdir -p {}/lists/{}'.format(out_fold,dataset))
outpicklefile = '{}/lists/{}/plda_{}.pkl'.format(out_fold,dataset,dataset)
if os.path.isfile(outpicklefile):
print("file exits!")
return
plda_file = '{}/plda'.format(fold_local)
if os.path.isfile(plda_file):
plda = kaldiPlda2numpydict(plda_file)
else:
print('plda model does not exist!')
plda = {}
transform_mat_file = '{}/transform.mat'.format(fold_local)
mean_vec_file = '{}/mean.vec'.format(fold_local)
transform_mat = np.asarray([w.split() for w in np.asarray(subprocess.check_output(["copy-matrix","--binary=false", transform_mat_file, "-"]).decode('utf-8').strip()[2:-2].split('\n'))]).astype(float)
mean_vec = np.asarray(subprocess.check_output(["copy-vector", "--binary=false",mean_vec_file, "-"]).decode('utf-8').strip()[1:-2].split()).astype(float)
plda['transform_mat'] = transform_mat
plda['mean_vec'] = mean_vec
with open(outpicklefile,'wb') as f:
pickle.dump(plda,f)
if __name__=='__main__':
args = setup()
load_kaldi_matrices(args) | services/convert_kaldi_to_pkl.py | import os
import sys
import numpy as np
import subprocess
import pickle
import argparse
#updating
def setup():
"""Get cmds and setup directories."""
cmdparser = argparse.ArgumentParser(description='convert kaldi PCA transform and mean into pickle format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdparser.add_argument('--kaldi_feats_path', help='path of folder where transform.mat and mean.vec stored', type=str,required=True)
cmdparser.add_argument('--dataset', help='dataset name', type=str, default="callhome1",required=True)
cmdparser.add_argument('--output_dir', help='path of SSC main folder to store pickle file', type=str, default="None",required=True)
cmdargs = cmdparser.parse_args()
return cmdargs
def kaldiPlda2numpydict(pldaFile):
#logging.debug('kaldi text file to numpy array: {}'.format(textfile))
fin = subprocess.check_output(["ivector-copy-plda", "--binary=false", pldaFile ,"-"])
res = {}
fin = fin.decode("utf-8").split('\n')
while '' in fin:
fin.remove('')
splitted = fin[0].strip().split()
res['plda_mean'] = np.asarray(splitted[2:-1]).astype(float)
tmparr=[]
for i,line in enumerate(fin[2:]):
splitted = line.strip().split()
if splitted[-1] == ']':
splitted = splitted[:-1]
tmparr.append(np.asarray(splitted).astype(float))
break
else:
tmparr.append(np.asarray(splitted).astype(float))
res['diagonalizing_transform'] = np.asarray(tmparr)
res['Psi_across_covar_diag'] = np.asarray(fin[-2].strip().split()[1:-1]).astype(float)
return res
def load_kaldi_matrices(args):
fold_local = args.kaldi_feats_path
dataset = args.dataset
out_fold = args.output_dir
os.system('mkdir -p {}/lists/{}'.format(out_fold,dataset))
outpicklefile = '{}/lists/{}/plda_{}.pkl'.format(out_fold,dataset,dataset)
if os.path.isfile(outpicklefile):
print("file exits!")
return
plda_file = '{}/plda'.format(fold_local)
if os.path.isfile(plda_file):
plda = kaldiPlda2numpydict(plda_file)
else:
print('plda model does not exist!')
plda = {}
transform_mat_file = '{}/transform.mat'.format(fold_local)
mean_vec_file = '{}/mean.vec'.format(fold_local)
transform_mat = np.asarray([w.split() for w in np.asarray(subprocess.check_output(["copy-matrix","--binary=false", transform_mat_file, "-"]).decode('utf-8').strip()[2:-2].split('\n'))]).astype(float)
mean_vec = np.asarray(subprocess.check_output(["copy-vector", "--binary=false",mean_vec_file, "-"]).decode('utf-8').strip()[1:-2].split()).astype(float)
plda['transform_mat'] = transform_mat
plda['mean_vec'] = mean_vec
with open(outpicklefile,'wb') as f:
pickle.dump(plda,f)
if __name__=='__main__':
args = setup()
load_kaldi_matrices(args) | 0.264168 | 0.135261 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("attribute", "0009_auto_20210421_0552"),
]
operations = [
migrations.AddField(
model_name="attribute",
name="unit",
field=models.CharField(
blank=True,
choices=[
("cm", "cm"),
("m", "m"),
("km", "km"),
("ft", "ft"),
("yd", "yd"),
("inch", "inch"),
("sq_cm", "sq_cm"),
("sq_m", "sq_m"),
("sq_km", "sq_km"),
("sq_ft", "sq_ft"),
("sq_yd", "sq_yd"),
("sq_inch", "sq_inch"),
("cubic_millimeter", "cubic_millimeter"),
("cubic_centimeter", "cubic_centimeter"),
("cubic_decimeter", "cubic_decimeter"),
("cubic_meter", "cubic_meter"),
("liter", "liter"),
("cubic_foot", "cubic_foot"),
("cubic_inch", "cubic_inch"),
("cubic_yard", "cubic_yard"),
("qt", "qt"),
("pint", "pint"),
("fl_oz", "fl_oz"),
("acre_in", "acre_in"),
("acre_ft", "acre_ft"),
("g", "g"),
("lb", "lb"),
("oz", "oz"),
("kg", "kg"),
("tonne", "tonne"),
],
max_length=100,
null=True,
),
),
migrations.AlterField(
model_name="attribute",
name="input_type",
field=models.CharField(
choices=[
("dropdown", "Dropdown"),
("multiselect", "Multi Select"),
("file", "File"),
("reference", "Reference"),
("numeric", "Numeric"),
("rich-text", "Rich Text"),
],
default="dropdown",
max_length=50,
),
),
] | saleor/attribute/migrations/0010_auto_20210412_0736.py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("attribute", "0009_auto_20210421_0552"),
]
operations = [
migrations.AddField(
model_name="attribute",
name="unit",
field=models.CharField(
blank=True,
choices=[
("cm", "cm"),
("m", "m"),
("km", "km"),
("ft", "ft"),
("yd", "yd"),
("inch", "inch"),
("sq_cm", "sq_cm"),
("sq_m", "sq_m"),
("sq_km", "sq_km"),
("sq_ft", "sq_ft"),
("sq_yd", "sq_yd"),
("sq_inch", "sq_inch"),
("cubic_millimeter", "cubic_millimeter"),
("cubic_centimeter", "cubic_centimeter"),
("cubic_decimeter", "cubic_decimeter"),
("cubic_meter", "cubic_meter"),
("liter", "liter"),
("cubic_foot", "cubic_foot"),
("cubic_inch", "cubic_inch"),
("cubic_yard", "cubic_yard"),
("qt", "qt"),
("pint", "pint"),
("fl_oz", "fl_oz"),
("acre_in", "acre_in"),
("acre_ft", "acre_ft"),
("g", "g"),
("lb", "lb"),
("oz", "oz"),
("kg", "kg"),
("tonne", "tonne"),
],
max_length=100,
null=True,
),
),
migrations.AlterField(
model_name="attribute",
name="input_type",
field=models.CharField(
choices=[
("dropdown", "Dropdown"),
("multiselect", "Multi Select"),
("file", "File"),
("reference", "Reference"),
("numeric", "Numeric"),
("rich-text", "Rich Text"),
],
default="dropdown",
max_length=50,
),
),
] | 0.584153 | 0.242452 |
from __future__ import print_function
import subprocess
import platform
import sys
import os
from docopt import docopt
active_platform = platform.system()
is_windows = active_platform.lower() == 'windows'
all_packages_list = ['pathlib', 'assimp', 'cyassimp', 'glew', 'glfw3', 'cyrasterize',
'menpo-pyvrml97', 'menpo', 'aniso8601', 'flask-restful',
'flask-compress', 'landmarkerio-server']
def upload_package(package, force):
from conda_build.metadata import MetaData
from conda_build.build import bldpkg_path
built_package_path = bldpkg_path(MetaData(package))
print('Uploading {} from {}'.format(package, built_package_path))
# Set up the binstar upload command
command = ['binstar', 'upload', '-u', 'menpo']
if force:
command += ['--force']
command += [built_package_path]
print('Running command "{}"'.format(' '.join(command)))
# Call binstar
error_code = subprocess.call(command, shell=is_windows)
if error_code != 0:
if force or error_code != 1:
raise EnvironmentError('Executing binstar upload failed with a '
'return code of {}'.format(error_code))
else:
print('Successfully uploaded {}'.format(package))
def run(packages, upload, force):
for package in packages:
print('Beginning to build {}'.format(package))
command = ['conda', 'build', package]
print('Running command "{}"'.format(' '.join(command)))
error_code = subprocess.call(command, shell=is_windows)
if error_code != 0:
raise EnvironmentError('Executing conda build failed with a '
'return code of {}'.format(error_code))
else:
print('Successfully built {}'.format(package))
if upload:
upload_package(package, force)
if __name__ == '__main__':
args = docopt(__doc__, version='0.0.1')
# Try default conda
try:
error_code = subprocess.call(['conda', '-V'], shell=is_windows)
if error_code != 0:
raise EnvironmentError(
'"conda" does not appear to be installed')
except Exception as e:
raise EnvironmentError('Failed to execute "conda"', e)
# Check for package names or 'all'
if (len(args['<package_names>']) == 1 and
args['<package_names>'][0] == 'all'):
packages = all_packages_list
else:
packages = args['<package_names>']
run(packages, args['--upload'], args['--force']) | build.py | from __future__ import print_function
import subprocess
import platform
import sys
import os
from docopt import docopt
active_platform = platform.system()
is_windows = active_platform.lower() == 'windows'
all_packages_list = ['pathlib', 'assimp', 'cyassimp', 'glew', 'glfw3', 'cyrasterize',
'menpo-pyvrml97', 'menpo', 'aniso8601', 'flask-restful',
'flask-compress', 'landmarkerio-server']
def upload_package(package, force):
from conda_build.metadata import MetaData
from conda_build.build import bldpkg_path
built_package_path = bldpkg_path(MetaData(package))
print('Uploading {} from {}'.format(package, built_package_path))
# Set up the binstar upload command
command = ['binstar', 'upload', '-u', 'menpo']
if force:
command += ['--force']
command += [built_package_path]
print('Running command "{}"'.format(' '.join(command)))
# Call binstar
error_code = subprocess.call(command, shell=is_windows)
if error_code != 0:
if force or error_code != 1:
raise EnvironmentError('Executing binstar upload failed with a '
'return code of {}'.format(error_code))
else:
print('Successfully uploaded {}'.format(package))
def run(packages, upload, force):
for package in packages:
print('Beginning to build {}'.format(package))
command = ['conda', 'build', package]
print('Running command "{}"'.format(' '.join(command)))
error_code = subprocess.call(command, shell=is_windows)
if error_code != 0:
raise EnvironmentError('Executing conda build failed with a '
'return code of {}'.format(error_code))
else:
print('Successfully built {}'.format(package))
if upload:
upload_package(package, force)
if __name__ == '__main__':
args = docopt(__doc__, version='0.0.1')
# Try default conda
try:
error_code = subprocess.call(['conda', '-V'], shell=is_windows)
if error_code != 0:
raise EnvironmentError(
'"conda" does not appear to be installed')
except Exception as e:
raise EnvironmentError('Failed to execute "conda"', e)
# Check for package names or 'all'
if (len(args['<package_names>']) == 1 and
args['<package_names>'][0] == 'all'):
packages = all_packages_list
else:
packages = args['<package_names>']
run(packages, args['--upload'], args['--force']) | 0.298901 | 0.088269 |
import asyncio
from lbry.testcase import CommandTestCase
from binascii import unhexlify
class WalletSynchronization(CommandTestCase):
SEED = "carbon smart garage balance margin twelve chest sword toast envelope bottom stomach absent"
async def test_sync(self):
daemon = self.daemon
daemon2 = await self.add_daemon(
seed="chest sword toast envelope bottom stomach absent "
"carbon smart garage balance margin twelve"
)
address = (await daemon2.wallet_manager.default_account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 1)
await self.confirm_tx(sendtxid, daemon2.ledger)
# Preferences
self.assertFalse(daemon.jsonrpc_preference_get())
self.assertFalse(daemon2.jsonrpc_preference_get())
daemon.jsonrpc_preference_set("one", "1")
daemon.jsonrpc_preference_set("conflict", "1")
daemon.jsonrpc_preference_set("fruit", '["peach", "apricot"]')
await asyncio.sleep(1)
daemon2.jsonrpc_preference_set("two", "2")
daemon2.jsonrpc_preference_set("conflict", "2")
self.assertDictEqual(daemon.jsonrpc_preference_get(), {
"one": "1", "conflict": "1", "fruit": ["peach", "apricot"]
})
self.assertDictEqual(daemon2.jsonrpc_preference_get(), {"two": "2", "conflict": "2"})
self.assertEqual(len((await daemon.jsonrpc_account_list())['lbc_regtest']), 1)
daemon2.jsonrpc_wallet_encrypt('password')
daemon2.jsonrpc_wallet_lock()
with self.assertRaises(AssertionError):
await daemon2.jsonrpc_sync_apply('password')
daemon2.jsonrpc_wallet_unlock('password')
data = await daemon2.jsonrpc_sync_apply('password')
await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True)
self.assertEqual(len((await daemon.jsonrpc_account_list())['lbc_regtest']), 2)
self.assertDictEqual(
# "two" key added and "conflict" value changed to "2"
daemon.jsonrpc_preference_get(),
{"one": "1", "two": "2", "conflict": "2", "fruit": ["peach", "apricot"]}
)
# Channel Certificate
channel = await daemon2.jsonrpc_channel_create('@foo', '0.1')
await daemon2.ledger.wait(channel)
await self.generate(1)
await daemon2.ledger.wait(channel)
# both daemons will have the channel but only one has the cert so far
self.assertEqual(len(await daemon.jsonrpc_channel_list()), 1)
self.assertEqual(len(daemon.wallet_manager.default_wallet.accounts[1].channel_keys), 0)
self.assertEqual(len(await daemon2.jsonrpc_channel_list()), 1)
self.assertEqual(len(daemon2.wallet_manager.default_account.channel_keys), 1)
data = await daemon2.jsonrpc_sync_apply('password')
await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True)
# both daemons have the cert after sync'ing
self.assertEqual(
daemon2.wallet_manager.default_account.channel_keys,
daemon.wallet_manager.default_wallet.accounts[1].channel_keys
) | lbry/tests/integration/test_sync.py | import asyncio
from lbry.testcase import CommandTestCase
from binascii import unhexlify
class WalletSynchronization(CommandTestCase):
SEED = "carbon smart garage balance margin twelve chest sword toast envelope bottom stomach absent"
async def test_sync(self):
daemon = self.daemon
daemon2 = await self.add_daemon(
seed="chest sword toast envelope bottom stomach absent "
"carbon smart garage balance margin twelve"
)
address = (await daemon2.wallet_manager.default_account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 1)
await self.confirm_tx(sendtxid, daemon2.ledger)
# Preferences
self.assertFalse(daemon.jsonrpc_preference_get())
self.assertFalse(daemon2.jsonrpc_preference_get())
daemon.jsonrpc_preference_set("one", "1")
daemon.jsonrpc_preference_set("conflict", "1")
daemon.jsonrpc_preference_set("fruit", '["peach", "apricot"]')
await asyncio.sleep(1)
daemon2.jsonrpc_preference_set("two", "2")
daemon2.jsonrpc_preference_set("conflict", "2")
self.assertDictEqual(daemon.jsonrpc_preference_get(), {
"one": "1", "conflict": "1", "fruit": ["peach", "apricot"]
})
self.assertDictEqual(daemon2.jsonrpc_preference_get(), {"two": "2", "conflict": "2"})
self.assertEqual(len((await daemon.jsonrpc_account_list())['lbc_regtest']), 1)
daemon2.jsonrpc_wallet_encrypt('password')
daemon2.jsonrpc_wallet_lock()
with self.assertRaises(AssertionError):
await daemon2.jsonrpc_sync_apply('password')
daemon2.jsonrpc_wallet_unlock('password')
data = await daemon2.jsonrpc_sync_apply('password')
await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True)
self.assertEqual(len((await daemon.jsonrpc_account_list())['lbc_regtest']), 2)
self.assertDictEqual(
# "two" key added and "conflict" value changed to "2"
daemon.jsonrpc_preference_get(),
{"one": "1", "two": "2", "conflict": "2", "fruit": ["peach", "apricot"]}
)
# Channel Certificate
channel = await daemon2.jsonrpc_channel_create('@foo', '0.1')
await daemon2.ledger.wait(channel)
await self.generate(1)
await daemon2.ledger.wait(channel)
# both daemons will have the channel but only one has the cert so far
self.assertEqual(len(await daemon.jsonrpc_channel_list()), 1)
self.assertEqual(len(daemon.wallet_manager.default_wallet.accounts[1].channel_keys), 0)
self.assertEqual(len(await daemon2.jsonrpc_channel_list()), 1)
self.assertEqual(len(daemon2.wallet_manager.default_account.channel_keys), 1)
data = await daemon2.jsonrpc_sync_apply('password')
await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True)
# both daemons have the cert after sync'ing
self.assertEqual(
daemon2.wallet_manager.default_account.channel_keys,
daemon.wallet_manager.default_wallet.accounts[1].channel_keys
) | 0.461259 | 0.345768 |
import numpy as np
from PIL import Image
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
def resize_and_crop(img, size=(100,100), crop_type='middle'):
# If height is higher we resize vertically, if not we resize horizontally
# Get current and desired ratio for the images
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
# The image is scaled/cropped vertically or horizontally
# depending on the ratio
if ratio > img_ratio:
img = img.resize((
size[0],
int(round(size[0] * img.size[1] / img.size[0]))),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_type == 'middle':
box = (
0,
int(round((img.size[1] - size[1]) / 2)),
img.size[0],
int(round((img.size[1] + size[1]) / 2)))
elif crop_type == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize((
int(round(size[1] * img.size[0] / img.size[1])),
size[1]),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_type == 'middle':
box = (
int(round((img.size[0] - size[0]) / 2)),
0,
int(round((img.size[0] + size[0]) / 2)),
img.size[1])
elif crop_type == 'bottom':
box = (
img.size[0] - size[0],
0,
img.size[0],
img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
else:
img = img.resize((
size[0],
size[1]),
Image.ANTIALIAS)
# If the scale is the same, we do not need to crop
return img | utils.py | import numpy as np
from PIL import Image
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
def resize_and_crop(img, size=(100,100), crop_type='middle'):
# If height is higher we resize vertically, if not we resize horizontally
# Get current and desired ratio for the images
img_ratio = img.size[0] / float(img.size[1])
ratio = size[0] / float(size[1])
# The image is scaled/cropped vertically or horizontally
# depending on the ratio
if ratio > img_ratio:
img = img.resize((
size[0],
int(round(size[0] * img.size[1] / img.size[0]))),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, img.size[0], size[1])
elif crop_type == 'middle':
box = (
0,
int(round((img.size[1] - size[1]) / 2)),
img.size[0],
int(round((img.size[1] + size[1]) / 2)))
elif crop_type == 'bottom':
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
elif ratio < img_ratio:
img = img.resize((
int(round(size[1] * img.size[0] / img.size[1])),
size[1]),
Image.ANTIALIAS)
# Crop in the top, middle or bottom
if crop_type == 'top':
box = (0, 0, size[0], img.size[1])
elif crop_type == 'middle':
box = (
int(round((img.size[0] - size[0]) / 2)),
0,
int(round((img.size[0] + size[0]) / 2)),
img.size[1])
elif crop_type == 'bottom':
box = (
img.size[0] - size[0],
0,
img.size[0],
img.size[1])
else:
raise ValueError('ERROR: invalid value for crop_type')
img = img.crop(box)
else:
img = img.resize((
size[0],
size[1]),
Image.ANTIALIAS)
# If the scale is the same, we do not need to crop
return img | 0.86757 | 0.777152 |
from collections import OrderedDict
from conans.model.conanfile_interface import ConanFileInterface
from conans.model.ref import ConanFileReference
class Requirement(object):
def __init__(self, ref, build=False, direct=True, test=False, visible=True):
# By default this is a generic library requirement
self.ref = ref
self.build = build # This dependent node is a build tool that is executed at build time only
self.direct = direct
self.test = test
self.visible = visible
def __repr__(self):
return repr(self.__dict__)
def __hash__(self):
return hash((self.ref.name, self.build))
def __eq__(self, other):
return self.ref.name == other.ref.name and self.build == other.build
def __ne__(self, other):
return not self.__eq__(other)
def aggregate(self, other):
""" when closing loop and finding the same dependency on a node, the information needs
to be aggregated
"""
assert self.build == other.build
self.visible |= other.visible
class UserRequirementsDict(object):
""" user facing dict to allow access of dependencies by name
"""
def __init__(self, data, require_filter=None):
self._data = data # dict-like
self._require_filter = require_filter # dict {trait: value} for requirements
def filter(self, require_filter):
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
return UserRequirementsDict(data, require_filter)
def __bool__(self):
return bool(self._data)
__nonzero__ = __bool__
def _get_require(self, ref, **kwargs):
assert isinstance(ref, str)
if "/" in ref:
ref = ConanFileReference.loads(ref)
else:
ref = ConanFileReference(ref, "unknown", "unknown", "unknown", validate=False)
if self._require_filter:
kwargs.update(self._require_filter)
r = Requirement(ref, **kwargs)
return r
def get(self, ref, **kwargs):
r = self._get_require(ref, **kwargs)
return self._data.get(r)
def __getitem__(self, name):
r = self._get_require(name)
return self._data[r]
def __delitem__(self, name):
r = self._get_require(name)
del self._data[r]
def items(self):
return self._data.items()
def values(self):
return self._data.values()
class ConanFileDependencies(UserRequirementsDict):
@staticmethod
def from_node(node):
# TODO: This construction will be easier in 2.0
build, test, host, private = [], [], [], []
for edge in node.dependencies:
if edge.build_require:
if not edge.require.force_host_context:
build.append(edge.dst)
else:
test.append(edge.dst)
elif edge.private:
private.append(edge.dst)
else:
host.append(edge.dst)
d = OrderedDict()
def update_existing(req, conanfile):
existing = d.get(req)
if existing is not None:
_, existing_req = existing
existing_req.aggregate(req)
req = existing_req
d[req] = conanfile, req
def expand(nodes, is_build, is_test, is_visible):
all_nodes = set(nodes)
for n in nodes:
conanfile = ConanFileInterface(n.conanfile)
req = Requirement(n.ref, build=is_build, test=is_test, visible=is_visible)
update_existing(req, conanfile)
next_nodes = nodes
while next_nodes:
new_nodes = []
for next_node in next_nodes:
for e in next_node.dependencies:
if not e.build_require and not e.private and e.dst not in all_nodes:
new_nodes.append(e.dst)
all_nodes.add(e.dst)
next_nodes = new_nodes
for n in next_nodes:
conanfile = ConanFileInterface(n.conanfile)
req = Requirement(n.ref, build=is_build, test=is_test, direct=False,
visible=is_visible)
update_existing(req, conanfile)
expand(host, is_build=False, is_test=False, is_visible=True)
expand(private, is_build=False, is_test=False, is_visible=False)
expand(build, is_build=True, is_test=False, is_visible=False)
expand(test, is_build=False, is_test=True, is_visible=False)
d = OrderedDict([(k, v[0])for k, v in d.items()])
return ConanFileDependencies(d)
def filter(self, require_filter):
# FIXME: Copy of hte above, to return ConanFileDependencies class object
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
return ConanFileDependencies(data, require_filter)
@property
def topological_sort(self):
# Return first independent nodes, final ones are the more direct deps
result = OrderedDict()
opened = self._data.copy()
while opened:
opened_values = set(opened.values())
new_opened = OrderedDict()
for req, conanfile in opened.items():
deps_in_opened = any(d in opened_values for d in conanfile.dependencies.values())
if deps_in_opened:
new_opened[req] = conanfile # keep it for next iteration
else:
result[req] = conanfile # No dependencies in open set!
opened = new_opened
return ConanFileDependencies(result)
@property
def direct_host(self):
return self.filter({"build": False, "direct": True, "test": False})
@property
def direct_build(self):
return self.filter({"build": True, "direct": True})
@property
def host(self):
return self.filter({"build": False, "test": False})
@property
def test(self):
return self.filter({"build": False, "test": True})
@property
def build(self):
return self.filter({"build": True}) | conans/model/dependencies.py | from collections import OrderedDict
from conans.model.conanfile_interface import ConanFileInterface
from conans.model.ref import ConanFileReference
class Requirement(object):
def __init__(self, ref, build=False, direct=True, test=False, visible=True):
# By default this is a generic library requirement
self.ref = ref
self.build = build # This dependent node is a build tool that is executed at build time only
self.direct = direct
self.test = test
self.visible = visible
def __repr__(self):
return repr(self.__dict__)
def __hash__(self):
return hash((self.ref.name, self.build))
def __eq__(self, other):
return self.ref.name == other.ref.name and self.build == other.build
def __ne__(self, other):
return not self.__eq__(other)
def aggregate(self, other):
""" when closing loop and finding the same dependency on a node, the information needs
to be aggregated
"""
assert self.build == other.build
self.visible |= other.visible
class UserRequirementsDict(object):
""" user facing dict to allow access of dependencies by name
"""
def __init__(self, data, require_filter=None):
self._data = data # dict-like
self._require_filter = require_filter # dict {trait: value} for requirements
def filter(self, require_filter):
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
return UserRequirementsDict(data, require_filter)
def __bool__(self):
return bool(self._data)
__nonzero__ = __bool__
def _get_require(self, ref, **kwargs):
assert isinstance(ref, str)
if "/" in ref:
ref = ConanFileReference.loads(ref)
else:
ref = ConanFileReference(ref, "unknown", "unknown", "unknown", validate=False)
if self._require_filter:
kwargs.update(self._require_filter)
r = Requirement(ref, **kwargs)
return r
def get(self, ref, **kwargs):
r = self._get_require(ref, **kwargs)
return self._data.get(r)
def __getitem__(self, name):
r = self._get_require(name)
return self._data[r]
def __delitem__(self, name):
r = self._get_require(name)
del self._data[r]
def items(self):
return self._data.items()
def values(self):
return self._data.values()
class ConanFileDependencies(UserRequirementsDict):
@staticmethod
def from_node(node):
# TODO: This construction will be easier in 2.0
build, test, host, private = [], [], [], []
for edge in node.dependencies:
if edge.build_require:
if not edge.require.force_host_context:
build.append(edge.dst)
else:
test.append(edge.dst)
elif edge.private:
private.append(edge.dst)
else:
host.append(edge.dst)
d = OrderedDict()
def update_existing(req, conanfile):
existing = d.get(req)
if existing is not None:
_, existing_req = existing
existing_req.aggregate(req)
req = existing_req
d[req] = conanfile, req
def expand(nodes, is_build, is_test, is_visible):
all_nodes = set(nodes)
for n in nodes:
conanfile = ConanFileInterface(n.conanfile)
req = Requirement(n.ref, build=is_build, test=is_test, visible=is_visible)
update_existing(req, conanfile)
next_nodes = nodes
while next_nodes:
new_nodes = []
for next_node in next_nodes:
for e in next_node.dependencies:
if not e.build_require and not e.private and e.dst not in all_nodes:
new_nodes.append(e.dst)
all_nodes.add(e.dst)
next_nodes = new_nodes
for n in next_nodes:
conanfile = ConanFileInterface(n.conanfile)
req = Requirement(n.ref, build=is_build, test=is_test, direct=False,
visible=is_visible)
update_existing(req, conanfile)
expand(host, is_build=False, is_test=False, is_visible=True)
expand(private, is_build=False, is_test=False, is_visible=False)
expand(build, is_build=True, is_test=False, is_visible=False)
expand(test, is_build=False, is_test=True, is_visible=False)
d = OrderedDict([(k, v[0])for k, v in d.items()])
return ConanFileDependencies(d)
def filter(self, require_filter):
# FIXME: Copy of hte above, to return ConanFileDependencies class object
def filter_fn(require):
for k, v in require_filter.items():
if getattr(require, k) != v:
return False
return True
data = OrderedDict((k, v) for k, v in self._data.items() if filter_fn(k))
return ConanFileDependencies(data, require_filter)
@property
def topological_sort(self):
# Return first independent nodes, final ones are the more direct deps
result = OrderedDict()
opened = self._data.copy()
while opened:
opened_values = set(opened.values())
new_opened = OrderedDict()
for req, conanfile in opened.items():
deps_in_opened = any(d in opened_values for d in conanfile.dependencies.values())
if deps_in_opened:
new_opened[req] = conanfile # keep it for next iteration
else:
result[req] = conanfile # No dependencies in open set!
opened = new_opened
return ConanFileDependencies(result)
@property
def direct_host(self):
return self.filter({"build": False, "direct": True, "test": False})
@property
def direct_build(self):
return self.filter({"build": True, "direct": True})
@property
def host(self):
return self.filter({"build": False, "test": False})
@property
def test(self):
return self.filter({"build": False, "test": True})
@property
def build(self):
return self.filter({"build": True}) | 0.645455 | 0.270956 |
revision = '441441c32014'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
# Categories to be removed in upgrade
LISTED_BUILDING_CPC = 'Listed building conditional planning consent'
# Categories to be reordered in upgrade
LISTED_BUILDING = 'Listed building'
ENFORCEMENT_NOTICE = 'Enforcement notice'
SMOKE_CO = 'Smoke control order'
SITE_OF_SSI = 'Site of special scientific interest (SSSI)'
LICENSES = 'Licences'
LOCAL_ACTS = 'Local acts'
WATER_DRAINAGE = 'Water / drainage'
UNCOMMON_CHARGES = 'Uncommon charges'
CONSERVATION_AREA = 'Conservation area'
CONDITIONAL_PC = 'Conditional planning consent'
ARTICLE_FOUR_OLD = 'Article 4 / no permitted development'
ARTICLE_FOUR_NEW = 'No permitted development / article 4'
PLANNING_NOTICES_OLD = 'Planning notices'
PLANNING_NOTICES_NEW = 'Planning notice'
PLANNING_AGREEMENT = 'Planning agreement'
TREE_PO = 'Tree preservation order (TPO)'
MODIFICATION = 'Modification / rectification orders'
def upgrade():
conn = op.get_bind()
planning_id = get_charge_category_id_by_name('Planning', True, conn)
listed_building_id = get_charge_category_id_by_name('Listed building', True, conn)
other_id = get_charge_category_id_by_name('Other', True, conn)
delete_category(LISTED_BUILDING_CPC, conn)
update_category_name(ARTICLE_FOUR_OLD, ARTICLE_FOUR_NEW)
update_category_name(PLANNING_NOTICES_OLD, PLANNING_NOTICES_NEW)
update_category_display_order(ENFORCEMENT_NOTICE, listed_building_id, 1, conn)
update_category_display_order(LISTED_BUILDING, listed_building_id, 2, conn)
update_category_display_order(LICENSES, other_id, 5, conn)
update_category_display_order(LOCAL_ACTS, other_id, 6, conn)
update_category_display_order(SITE_OF_SSI, other_id, 7, conn)
update_category_display_order(SMOKE_CO, other_id, 8, conn)
update_category_display_order(UNCOMMON_CHARGES, other_id, 9, conn)
update_category_display_order(WATER_DRAINAGE, other_id, 10, conn)
update_category_display_order(CONDITIONAL_PC, planning_id, 1, conn)
update_category_display_order(CONSERVATION_AREA, planning_id, 2, conn)
update_category_display_order(MODIFICATION, planning_id, 4, conn)
update_category_display_order(ARTICLE_FOUR_NEW, planning_id, 5, conn)
update_category_display_order(PLANNING_NOTICES_NEW, planning_id, 7, conn)
update_category_display_order(TREE_PO, planning_id, 9, conn)
def downgrade():
conn = op.get_bind()
planning_id = get_charge_category_id_by_name('Planning', True, conn)
listed_building_id = get_charge_category_id_by_name('Listed building', True, conn)
other_id = get_charge_category_id_by_name('Other', True, conn)
insert_category(LISTED_BUILDING_CPC, LISTED_BUILDING_CPC, planning_id, 8)
update_category_name(ARTICLE_FOUR_NEW, ARTICLE_FOUR_OLD)
update_category_name(PLANNING_NOTICES_NEW, PLANNING_NOTICES_OLD)
update_category_display_order(LISTED_BUILDING, listed_building_id, 1, conn)
update_category_display_order(ENFORCEMENT_NOTICE, listed_building_id, 2, conn)
update_category_display_order(SMOKE_CO, other_id, 5, conn)
update_category_display_order(SITE_OF_SSI, other_id, 6, conn)
update_category_display_order(LICENSES, other_id, 7, conn)
update_category_display_order(LOCAL_ACTS, other_id, 8, conn)
update_category_display_order(WATER_DRAINAGE, other_id, 9, conn)
update_category_display_order(UNCOMMON_CHARGES, other_id, 10, conn)
update_category_display_order(CONSERVATION_AREA, planning_id, 1, conn)
update_category_display_order(CONDITIONAL_PC, planning_id, 2, conn)
update_category_display_order(ARTICLE_FOUR_OLD, planning_id, 4, conn)
update_category_display_order(PLANNING_NOTICES_OLD, planning_id, 5, conn)
update_category_display_order(TREE_PO, planning_id, 7, conn)
update_category_display_order(MODIFICATION, planning_id, 10, conn)
def get_charge_category_id_by_name(category_name, is_parent, conn):
if is_parent:
query = "SELECT id FROM charge_categories WHERE name = '{}' AND parent_id IS NULL".format(category_name)
else:
query = "SELECT id FROM charge_categories WHERE name = '{}'".format(category_name)
res = conn.execute(query)
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to retrieve charge category with name '{}'".format(category_name))
return results[0][0]
def update_category_name(current_name, new_name):
op.execute("UPDATE charge_categories "
"SET name = '{0}', display_name = '{0}' "
"WHERE name = '{1}'".format(new_name, current_name))
def insert_category(name, display_name, parent_id, display_order):
op.execute("UPDATE charge_categories "
"SET display_order = display_order + 1 "
"WHERE display_order >= {} AND parent_id = {}".format(display_order, parent_id))
query = "DO $$ BEGIN IF NOT EXISTS " \
"(SELECT FROM charge_categories " \
"WHERE name = '{}' AND parent_id = {}) THEN ".format(name, parent_id)
query += "INSERT INTO charge_categories (name, display_name, parent_id, display_order) " \
"VALUES ('{}', '{}', {}, {})".format(name, display_name, parent_id, display_order)
query += "; END IF; END $$"
op.execute(query)
def delete_category(category, conn):
res = conn.execute("SELECT id, display_order, parent_id "
"FROM charge_categories "
"WHERE name = '{}'".format(category))
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to delete charge category with name '{}', category not found".format(category))
category_id = results[0][0]
display_order = results[0][1]
parent_id = results[0][2]
op.execute("UPDATE charge_categories "
"SET display_order = display_order - 1 "
"WHERE display_order >= {} AND parent_id = {}".format(display_order, parent_id))
op.execute("DELETE FROM charge_categories_stat_provisions WHERE category_id = {}".format(category_id))
op.execute("DELETE FROM charge_categories_instruments WHERE category_id = {}".format(category_id))
op.execute("DELETE FROM charge_categories WHERE id = {}".format(category_id))
def update_category_display_order(category, parent_id, new_display_order, conn):
res = conn.execute("SELECT id, display_order "
"FROM charge_categories "
"WHERE name = '{}' AND parent_id = {}".format(category, parent_id))
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to update charge category with name '{}', category not found".format(category))
category_id = results[0][0]
old_display_order = results[0][1]
# Swap display order, put whatever sub-category is currently in the new display order, into the old display order
op.execute("UPDATE charge_categories "
"SET display_order = {} "
"WHERE display_order = {} AND parent_id = {}".format(old_display_order, new_display_order, parent_id))
# Set old display order to new display order
op.execute("UPDATE charge_categories "
"SET display_order = {} "
"WHERE id = {}".format(new_display_order, category_id)) | migrations/versions/441441c32014_modify_charge_categories.py | revision = '441441c32014'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
# Categories to be removed in upgrade
LISTED_BUILDING_CPC = 'Listed building conditional planning consent'
# Categories to be reordered in upgrade
LISTED_BUILDING = 'Listed building'
ENFORCEMENT_NOTICE = 'Enforcement notice'
SMOKE_CO = 'Smoke control order'
SITE_OF_SSI = 'Site of special scientific interest (SSSI)'
LICENSES = 'Licences'
LOCAL_ACTS = 'Local acts'
WATER_DRAINAGE = 'Water / drainage'
UNCOMMON_CHARGES = 'Uncommon charges'
CONSERVATION_AREA = 'Conservation area'
CONDITIONAL_PC = 'Conditional planning consent'
ARTICLE_FOUR_OLD = 'Article 4 / no permitted development'
ARTICLE_FOUR_NEW = 'No permitted development / article 4'
PLANNING_NOTICES_OLD = 'Planning notices'
PLANNING_NOTICES_NEW = 'Planning notice'
PLANNING_AGREEMENT = 'Planning agreement'
TREE_PO = 'Tree preservation order (TPO)'
MODIFICATION = 'Modification / rectification orders'
def upgrade():
conn = op.get_bind()
planning_id = get_charge_category_id_by_name('Planning', True, conn)
listed_building_id = get_charge_category_id_by_name('Listed building', True, conn)
other_id = get_charge_category_id_by_name('Other', True, conn)
delete_category(LISTED_BUILDING_CPC, conn)
update_category_name(ARTICLE_FOUR_OLD, ARTICLE_FOUR_NEW)
update_category_name(PLANNING_NOTICES_OLD, PLANNING_NOTICES_NEW)
update_category_display_order(ENFORCEMENT_NOTICE, listed_building_id, 1, conn)
update_category_display_order(LISTED_BUILDING, listed_building_id, 2, conn)
update_category_display_order(LICENSES, other_id, 5, conn)
update_category_display_order(LOCAL_ACTS, other_id, 6, conn)
update_category_display_order(SITE_OF_SSI, other_id, 7, conn)
update_category_display_order(SMOKE_CO, other_id, 8, conn)
update_category_display_order(UNCOMMON_CHARGES, other_id, 9, conn)
update_category_display_order(WATER_DRAINAGE, other_id, 10, conn)
update_category_display_order(CONDITIONAL_PC, planning_id, 1, conn)
update_category_display_order(CONSERVATION_AREA, planning_id, 2, conn)
update_category_display_order(MODIFICATION, planning_id, 4, conn)
update_category_display_order(ARTICLE_FOUR_NEW, planning_id, 5, conn)
update_category_display_order(PLANNING_NOTICES_NEW, planning_id, 7, conn)
update_category_display_order(TREE_PO, planning_id, 9, conn)
def downgrade():
conn = op.get_bind()
planning_id = get_charge_category_id_by_name('Planning', True, conn)
listed_building_id = get_charge_category_id_by_name('Listed building', True, conn)
other_id = get_charge_category_id_by_name('Other', True, conn)
insert_category(LISTED_BUILDING_CPC, LISTED_BUILDING_CPC, planning_id, 8)
update_category_name(ARTICLE_FOUR_NEW, ARTICLE_FOUR_OLD)
update_category_name(PLANNING_NOTICES_NEW, PLANNING_NOTICES_OLD)
update_category_display_order(LISTED_BUILDING, listed_building_id, 1, conn)
update_category_display_order(ENFORCEMENT_NOTICE, listed_building_id, 2, conn)
update_category_display_order(SMOKE_CO, other_id, 5, conn)
update_category_display_order(SITE_OF_SSI, other_id, 6, conn)
update_category_display_order(LICENSES, other_id, 7, conn)
update_category_display_order(LOCAL_ACTS, other_id, 8, conn)
update_category_display_order(WATER_DRAINAGE, other_id, 9, conn)
update_category_display_order(UNCOMMON_CHARGES, other_id, 10, conn)
update_category_display_order(CONSERVATION_AREA, planning_id, 1, conn)
update_category_display_order(CONDITIONAL_PC, planning_id, 2, conn)
update_category_display_order(ARTICLE_FOUR_OLD, planning_id, 4, conn)
update_category_display_order(PLANNING_NOTICES_OLD, planning_id, 5, conn)
update_category_display_order(TREE_PO, planning_id, 7, conn)
update_category_display_order(MODIFICATION, planning_id, 10, conn)
def get_charge_category_id_by_name(category_name, is_parent, conn):
if is_parent:
query = "SELECT id FROM charge_categories WHERE name = '{}' AND parent_id IS NULL".format(category_name)
else:
query = "SELECT id FROM charge_categories WHERE name = '{}'".format(category_name)
res = conn.execute(query)
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to retrieve charge category with name '{}'".format(category_name))
return results[0][0]
def update_category_name(current_name, new_name):
op.execute("UPDATE charge_categories "
"SET name = '{0}', display_name = '{0}' "
"WHERE name = '{1}'".format(new_name, current_name))
def insert_category(name, display_name, parent_id, display_order):
op.execute("UPDATE charge_categories "
"SET display_order = display_order + 1 "
"WHERE display_order >= {} AND parent_id = {}".format(display_order, parent_id))
query = "DO $$ BEGIN IF NOT EXISTS " \
"(SELECT FROM charge_categories " \
"WHERE name = '{}' AND parent_id = {}) THEN ".format(name, parent_id)
query += "INSERT INTO charge_categories (name, display_name, parent_id, display_order) " \
"VALUES ('{}', '{}', {}, {})".format(name, display_name, parent_id, display_order)
query += "; END IF; END $$"
op.execute(query)
def delete_category(category, conn):
res = conn.execute("SELECT id, display_order, parent_id "
"FROM charge_categories "
"WHERE name = '{}'".format(category))
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to delete charge category with name '{}', category not found".format(category))
category_id = results[0][0]
display_order = results[0][1]
parent_id = results[0][2]
op.execute("UPDATE charge_categories "
"SET display_order = display_order - 1 "
"WHERE display_order >= {} AND parent_id = {}".format(display_order, parent_id))
op.execute("DELETE FROM charge_categories_stat_provisions WHERE category_id = {}".format(category_id))
op.execute("DELETE FROM charge_categories_instruments WHERE category_id = {}".format(category_id))
op.execute("DELETE FROM charge_categories WHERE id = {}".format(category_id))
def update_category_display_order(category, parent_id, new_display_order, conn):
res = conn.execute("SELECT id, display_order "
"FROM charge_categories "
"WHERE name = '{}' AND parent_id = {}".format(category, parent_id))
results = res.fetchall()
if results is None or len(results) == 0:
raise Exception("Unable to update charge category with name '{}', category not found".format(category))
category_id = results[0][0]
old_display_order = results[0][1]
# Swap display order, put whatever sub-category is currently in the new display order, into the old display order
op.execute("UPDATE charge_categories "
"SET display_order = {} "
"WHERE display_order = {} AND parent_id = {}".format(old_display_order, new_display_order, parent_id))
# Set old display order to new display order
op.execute("UPDATE charge_categories "
"SET display_order = {} "
"WHERE id = {}".format(new_display_order, category_id)) | 0.178347 | 0.085901 |
from __future__ import unicode_literals
from . import six
if six.PY3:
basestring = str
long = int
xrange = range
unicode = str
#Undefined
class PyJsUndefined(object):
TYPE = 'Undefined'
Class = 'Undefined'
undefined = PyJsUndefined()
#Null
class PyJsNull(object):
TYPE = 'Null'
Class = 'Null'
null = PyJsNull()
Infinity = float('inf')
NaN = float('nan')
UNDEFINED_TYPE = PyJsUndefined
NULL_TYPE = PyJsNull
STRING_TYPE = unicode if six.PY2 else str
NUMBER_TYPE = float
BOOLEAN_TYPE = bool
# exactly 5 simplexes!
PRIMITIVES = frozenset(
[UNDEFINED_TYPE, NULL_TYPE, STRING_TYPE, NUMBER_TYPE, BOOLEAN_TYPE])
TYPE_NAMES = {
UNDEFINED_TYPE: 'Undefined',
NULL_TYPE: 'Null',
STRING_TYPE: 'String',
NUMBER_TYPE: 'Number',
BOOLEAN_TYPE: 'Boolean',
}
def Type(x):
# Any -> Str
return TYPE_NAMES.get(type(x), 'Object')
def GetClass(x):
# Any -> Str
cand = TYPE_NAMES.get(type(x))
if cand is None:
return x.Class
return cand
def is_undefined(self):
return self is undefined
def is_null(self):
return self is null
def is_primitive(self):
return type(self) in PRIMITIVES
def is_object(self):
return not is_primitive(self)
def is_callable(self):
return hasattr(self, 'call')
def is_infinity(self):
return self == Infinity or self == -Infinity
def is_nan(self):
return self != self # nan!=nan evaluates to True
def is_finite(self):
return not (is_nan(self) or is_infinity(self))
class JsException(Exception):
def __init__(self, typ=None, message=None, throw=None):
if typ is None and message is None and throw is None:
# it means its the trasnlator based error (old format), do nothing
self._translator_based = True
else:
assert throw is None or (typ is None
and message is None), (throw, typ,
message)
self._translator_based = False
self.typ = typ
self.message = message
self.throw = throw
def get_thrown_value(self, space):
if self.throw is not None:
return self.throw
else:
return space.NewError(self.typ, self.message)
def __str__(self):
if self._translator_based:
if self.mes.Class == 'Error':
return self.mes.callprop('toString').value
else:
return self.mes.to_string().value
else:
if self.throw is not None:
from .conversions import to_string
return to_string(self.throw)
else:
return self.typ + ': ' + self.message
def MakeError(typ, message=u'no info', throw=None):
return JsException(typ,
unicode(message) if message is not None else message,
throw)
def value_from_js_exception(js_exception, space):
if js_exception.throw is not None:
return js_exception.throw
else:
return space.NewError(js_exception.typ, js_exception.message)
def js_dtoa(number):
if is_nan(number):
return u'NaN'
elif is_infinity(number):
if number > 0:
return u'Infinity'
return u'-Infinity'
elif number == 0.:
return u'0'
elif abs(number) < 1e-6 or abs(number) >= 1e21:
frac, exponent = unicode(repr(float(number))).split('e')
# Remove leading zeros from the exponent.
exponent = int(exponent)
return frac + ('e' if exponent < 0 else 'e+') + unicode(exponent)
elif abs(number) < 1e-4: # python starts to return exp notation while we still want the prec
frac, exponent = unicode(repr(float(number))).split('e-')
base = u'0.' + u'0' * (int(exponent) - 1) + frac.lstrip('-').replace('.', '')
return base if number > 0. else u'-' + base
elif isinstance(number, long) or number.is_integer(): # dont print .0
return unicode(int(number))
return unicode(repr(number)) # python representation should be equivalent. | resources/lib/js2py/internals/simplex.py | from __future__ import unicode_literals
from . import six
if six.PY3:
basestring = str
long = int
xrange = range
unicode = str
#Undefined
class PyJsUndefined(object):
TYPE = 'Undefined'
Class = 'Undefined'
undefined = PyJsUndefined()
#Null
class PyJsNull(object):
TYPE = 'Null'
Class = 'Null'
null = PyJsNull()
Infinity = float('inf')
NaN = float('nan')
UNDEFINED_TYPE = PyJsUndefined
NULL_TYPE = PyJsNull
STRING_TYPE = unicode if six.PY2 else str
NUMBER_TYPE = float
BOOLEAN_TYPE = bool
# exactly 5 simplexes!
PRIMITIVES = frozenset(
[UNDEFINED_TYPE, NULL_TYPE, STRING_TYPE, NUMBER_TYPE, BOOLEAN_TYPE])
TYPE_NAMES = {
UNDEFINED_TYPE: 'Undefined',
NULL_TYPE: 'Null',
STRING_TYPE: 'String',
NUMBER_TYPE: 'Number',
BOOLEAN_TYPE: 'Boolean',
}
def Type(x):
# Any -> Str
return TYPE_NAMES.get(type(x), 'Object')
def GetClass(x):
# Any -> Str
cand = TYPE_NAMES.get(type(x))
if cand is None:
return x.Class
return cand
def is_undefined(self):
return self is undefined
def is_null(self):
return self is null
def is_primitive(self):
return type(self) in PRIMITIVES
def is_object(self):
return not is_primitive(self)
def is_callable(self):
return hasattr(self, 'call')
def is_infinity(self):
return self == Infinity or self == -Infinity
def is_nan(self):
return self != self # nan!=nan evaluates to True
def is_finite(self):
return not (is_nan(self) or is_infinity(self))
class JsException(Exception):
def __init__(self, typ=None, message=None, throw=None):
if typ is None and message is None and throw is None:
# it means its the trasnlator based error (old format), do nothing
self._translator_based = True
else:
assert throw is None or (typ is None
and message is None), (throw, typ,
message)
self._translator_based = False
self.typ = typ
self.message = message
self.throw = throw
def get_thrown_value(self, space):
if self.throw is not None:
return self.throw
else:
return space.NewError(self.typ, self.message)
def __str__(self):
if self._translator_based:
if self.mes.Class == 'Error':
return self.mes.callprop('toString').value
else:
return self.mes.to_string().value
else:
if self.throw is not None:
from .conversions import to_string
return to_string(self.throw)
else:
return self.typ + ': ' + self.message
def MakeError(typ, message=u'no info', throw=None):
return JsException(typ,
unicode(message) if message is not None else message,
throw)
def value_from_js_exception(js_exception, space):
if js_exception.throw is not None:
return js_exception.throw
else:
return space.NewError(js_exception.typ, js_exception.message)
def js_dtoa(number):
if is_nan(number):
return u'NaN'
elif is_infinity(number):
if number > 0:
return u'Infinity'
return u'-Infinity'
elif number == 0.:
return u'0'
elif abs(number) < 1e-6 or abs(number) >= 1e21:
frac, exponent = unicode(repr(float(number))).split('e')
# Remove leading zeros from the exponent.
exponent = int(exponent)
return frac + ('e' if exponent < 0 else 'e+') + unicode(exponent)
elif abs(number) < 1e-4: # python starts to return exp notation while we still want the prec
frac, exponent = unicode(repr(float(number))).split('e-')
base = u'0.' + u'0' * (int(exponent) - 1) + frac.lstrip('-').replace('.', '')
return base if number > 0. else u'-' + base
elif isinstance(number, long) or number.is_integer(): # dont print .0
return unicode(int(number))
return unicode(repr(number)) # python representation should be equivalent. | 0.508788 | 0.206754 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from graph.types import (MatMulOpParameters, MatrixAddParameters,
MatrixDivParameters, MatrixMulParameters,
MatrixSubParameters)
from graph.types.expression_fusion import ExpressionFusionParameters
from graph.types.fusions import MatScaleFusionParameters
from graph.types.tensor_arithmetic import Broadcastable, MatMulTransposedParameters
from execution.kernels.kernel_base import (KernelBase, params_type,
qrec_type)
from quantization.qtype import QType
from quantization.new_qrec import QRec
from utils.at_norm import at_norm
PIECEWISE_OPS = {
MatrixAddParameters: {'op': lambda x, y, dtype: x + y, 'is_mult': False},
MatrixMulParameters: {'op': lambda x, y, dtype: np.multiply(x, y, dtype=dtype), 'is_mult': True},
MatrixSubParameters: {'op': lambda x, y, dtype: x - y, 'is_mult': False},
MatrixDivParameters: {'op': lambda x, y, dtype: x / y, 'is_mult': True},
}
LOG = logging.getLogger("nntool." + __name__)
@params_type(MatrixAddParameters, MatrixDivParameters,
MatrixMulParameters, MatrixSubParameters)
@qrec_type('scaled')
class PieceWiseSymmetricMult(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
if isinstance(params, Broadcastable) and params.is_broadcasted:
in_tensors = params.broadcast_inputs(in_tensors)
func = PIECEWISE_OPS[params.__class__]
op = func['op']
if func['is_mult']:
# compute_in_out_scale(qrec, in_idx=(0, 1), out_idx=0)
scale_mul_biases_q = qrec.cache['scale_mul_biases_q']
i1 = in_tensors[0].astype(np.int32)
i2 = in_tensors[1].astype(np.int32)
out_tensor = scale_mul_biases_q.apply_scales(op(i1, i2, np.int32))
else:
# larger scale should be scaled
# set_add_in_scale(qrec)
scale_mul_biases_q = qrec.cache['scale_mul_biases_q']
if qrec.cache['scaled_idx']:
i1 = in_tensors[0].astype(np.int32)
i2 = qrec.cache['scale_in_mul_biases_q'].apply_scales(in_tensors[1])
else:
i1 = qrec.cache['scale_in_mul_biases_q'].apply_scales(in_tensors[0])
i2 = in_tensors[1].astype(np.int32)
out_tensor = scale_mul_biases_q.apply_scales(op(i1, i2, None)) + qrec.cache['add_bias_offset']
return qrec.get_outputs(params, [qrec.out_qs[0].clip(out_tensor)], ktype="symmetric")
@params_type(MatrixAddParameters, MatrixDivParameters,
MatrixMulParameters, MatrixSubParameters)
@qrec_type('symmetric')
class PieceWiseSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
func = PIECEWISE_OPS[params.__class__]
op = func['op']
if func['is_mult']:
i1 = in_tensors[0].astype(np.int32)
i2 = in_tensors[1].astype(np.int32)
res = op(i1, i2, np.int32)
q_calc = QType.Pow2(
bits=32, q=qrec.in_qs[0].q+qrec.in_qs[1].q, signed=True)
res = qrec.out_qs[0].reduce_from(res, q_calc)
else:
off_in = abs(qrec.in_qs[0].q - qrec.in_qs[1].q)
if qrec.in_qs[0].q > qrec.in_qs[1].q:
i1 = at_norm(in_tensors[0].astype(np.int32), off_in)
i2 = in_tensors[1].astype(np.int32)
else:
i1 = in_tensors[0].astype(np.int32)
i2 = at_norm(in_tensors[1].astype(np.int32), off_in)
res = op(i1, i2, None)
q_calc = QType.Pow2(bits=32, q=min(qrec.in_qs[0].q, qrec.in_qs[1].q), signed=True)
res = qrec.out_qs[0].reduce_from(res, q_calc)
return qrec.get_outputs(params, [res], ktype="symmetric")
@params_type(MatScaleFusionParameters)
@qrec_type('symmetric', 'scaled')
class MatScaleSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
LOG.debug("matscale input %s", ",".join([t.dtype.name for t in in_tensors]))
if len(params.in_dims) == 3:
output_tensor = cls.matscale3(in_tensors, qrec)
else:
output_tensor = cls.matscale2(in_tensors, qrec)
return qrec.get_outputs(params, [output_tensor], ktype="symmetric")
@classmethod
def matscale3(cls, in_tensors, qrec):
assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
assert qrec.in_qs[1].bits == qrec.in_qs[2].bits
if qrec.in_qs[0].bits == 8:
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q +
qrec.in_qs[1].q + qrec.in_qs[2].q, signed=True)
res = np.multiply(np.multiply(in_tensors[0], in_tensors[1],
dtype=np.int32),
in_tensors[2],
dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
elif qrec.in_qs[0].bits == 16:
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[2].q + qrec.out_qs[0].q, signed=True)
res = np.multiply(res, in_tensors[2], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
else:
raise ValueError("only 8 and 16 bits supported")
return res
@classmethod
def matscale2(cls, in_tensors, qrec=None):
assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
return res
@params_type(ExpressionFusionParameters)
@qrec_type('symmetric', 'scaled')
class ExpressionSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
details = kwargs.get('details')
if details is not None:
results = {}
else:
results = None
in_vars = {params.input_symbols[i]: in_tensor.copy()
for i, in_tensor in enumerate(in_tensors)}
out_vars = qrec.cache['qfunc_col'](**in_vars, track_results=results)
out_tensors = [out_vars[out_sym_name]
for out_sym_name in params.output_symbols]
if details is not None:
details['results'] = results
return qrec.get_outputs(params, out_tensors, ktype="symmetric")
@params_type(MatMulOpParameters, MatMulTransposedParameters)
@qrec_type('scaled')
class MatMulScaled(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = [in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")]
if isinstance(params, MatMulTransposedParameters):
mat1, mat2 = in_tensors[0], np.swapaxes(in_tensors[1], -2, -1)
else:
mat1, mat2 = in_tensors[0], in_tensors[1]
mat2 = mat2.astype(np.int32) - qrec.in_qs[1].zero_point.astype(np.int32)
if len(in_tensors) > 2:
biases = in_tensors[2]
if len(biases.shape) == 1:
if biases.shape[0] == mat1.shape[0]:
biases = np.expand_dims(biases, -1)
else:
biases = 0
out_tensor = np.matmul(mat1, mat2) + biases
out_rank = len(out_tensor.shape)
mul_biases_q = qrec.cache['mul_biases_q']
scale_axis = None if len(mul_biases_q.scale) == 1 else \
(out_rank-1 if isinstance(params, MatMulTransposedParameters) else out_rank-2)
out_tensor = mul_biases_q.apply_scales(out_tensor, scale_axis)
return qrec.get_outputs(params, [out_tensor], ktype="symmetric")
@params_type(MatMulOpParameters, MatMulTransposedParameters)
@qrec_type('symmetric')
class MatMulSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = [in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")]
if isinstance(params, MatMulTransposedParameters):
mat1, mat2 = in_tensors[0], np.swapaxes(in_tensors[1], -2, -1)
else:
mat1, mat2 = in_tensors[0], in_tensors[1]
if len(in_tensors) > 2:
biases = in_tensors[2]
if len(biases.shape) == 1:
if biases.shape[0] == mat1.shape[0]:
biases = np.expand_dims(biases, -1)
else:
biases = 0
# expect biases in in_q1 + in_q2
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
out_tensor = np.matmul(mat1, mat2) + biases
out_tensor = qrec.out_qs[0].reduce_from(out_tensor, q_calc)
return qrec.get_outputs(params, [out_tensor], ktype="symmetric") | tools/nntool/execution/kernels/quant/matrix_operations.py |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from graph.types import (MatMulOpParameters, MatrixAddParameters,
MatrixDivParameters, MatrixMulParameters,
MatrixSubParameters)
from graph.types.expression_fusion import ExpressionFusionParameters
from graph.types.fusions import MatScaleFusionParameters
from graph.types.tensor_arithmetic import Broadcastable, MatMulTransposedParameters
from execution.kernels.kernel_base import (KernelBase, params_type,
qrec_type)
from quantization.qtype import QType
from quantization.new_qrec import QRec
from utils.at_norm import at_norm
PIECEWISE_OPS = {
MatrixAddParameters: {'op': lambda x, y, dtype: x + y, 'is_mult': False},
MatrixMulParameters: {'op': lambda x, y, dtype: np.multiply(x, y, dtype=dtype), 'is_mult': True},
MatrixSubParameters: {'op': lambda x, y, dtype: x - y, 'is_mult': False},
MatrixDivParameters: {'op': lambda x, y, dtype: x / y, 'is_mult': True},
}
LOG = logging.getLogger("nntool." + __name__)
@params_type(MatrixAddParameters, MatrixDivParameters,
MatrixMulParameters, MatrixSubParameters)
@qrec_type('scaled')
class PieceWiseSymmetricMult(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
if isinstance(params, Broadcastable) and params.is_broadcasted:
in_tensors = params.broadcast_inputs(in_tensors)
func = PIECEWISE_OPS[params.__class__]
op = func['op']
if func['is_mult']:
# compute_in_out_scale(qrec, in_idx=(0, 1), out_idx=0)
scale_mul_biases_q = qrec.cache['scale_mul_biases_q']
i1 = in_tensors[0].astype(np.int32)
i2 = in_tensors[1].astype(np.int32)
out_tensor = scale_mul_biases_q.apply_scales(op(i1, i2, np.int32))
else:
# larger scale should be scaled
# set_add_in_scale(qrec)
scale_mul_biases_q = qrec.cache['scale_mul_biases_q']
if qrec.cache['scaled_idx']:
i1 = in_tensors[0].astype(np.int32)
i2 = qrec.cache['scale_in_mul_biases_q'].apply_scales(in_tensors[1])
else:
i1 = qrec.cache['scale_in_mul_biases_q'].apply_scales(in_tensors[0])
i2 = in_tensors[1].astype(np.int32)
out_tensor = scale_mul_biases_q.apply_scales(op(i1, i2, None)) + qrec.cache['add_bias_offset']
return qrec.get_outputs(params, [qrec.out_qs[0].clip(out_tensor)], ktype="symmetric")
@params_type(MatrixAddParameters, MatrixDivParameters,
MatrixMulParameters, MatrixSubParameters)
@qrec_type('symmetric')
class PieceWiseSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
func = PIECEWISE_OPS[params.__class__]
op = func['op']
if func['is_mult']:
i1 = in_tensors[0].astype(np.int32)
i2 = in_tensors[1].astype(np.int32)
res = op(i1, i2, np.int32)
q_calc = QType.Pow2(
bits=32, q=qrec.in_qs[0].q+qrec.in_qs[1].q, signed=True)
res = qrec.out_qs[0].reduce_from(res, q_calc)
else:
off_in = abs(qrec.in_qs[0].q - qrec.in_qs[1].q)
if qrec.in_qs[0].q > qrec.in_qs[1].q:
i1 = at_norm(in_tensors[0].astype(np.int32), off_in)
i2 = in_tensors[1].astype(np.int32)
else:
i1 = in_tensors[0].astype(np.int32)
i2 = at_norm(in_tensors[1].astype(np.int32), off_in)
res = op(i1, i2, None)
q_calc = QType.Pow2(bits=32, q=min(qrec.in_qs[0].q, qrec.in_qs[1].q), signed=True)
res = qrec.out_qs[0].reduce_from(res, q_calc)
return qrec.get_outputs(params, [res], ktype="symmetric")
@params_type(MatScaleFusionParameters)
@qrec_type('symmetric', 'scaled')
class MatScaleSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
LOG.debug("matscale input %s", ",".join([t.dtype.name for t in in_tensors]))
if len(params.in_dims) == 3:
output_tensor = cls.matscale3(in_tensors, qrec)
else:
output_tensor = cls.matscale2(in_tensors, qrec)
return qrec.get_outputs(params, [output_tensor], ktype="symmetric")
@classmethod
def matscale3(cls, in_tensors, qrec):
assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
assert qrec.in_qs[1].bits == qrec.in_qs[2].bits
if qrec.in_qs[0].bits == 8:
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q +
qrec.in_qs[1].q + qrec.in_qs[2].q, signed=True)
res = np.multiply(np.multiply(in_tensors[0], in_tensors[1],
dtype=np.int32),
in_tensors[2],
dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
elif qrec.in_qs[0].bits == 16:
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[2].q + qrec.out_qs[0].q, signed=True)
res = np.multiply(res, in_tensors[2], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
else:
raise ValueError("only 8 and 16 bits supported")
return res
@classmethod
def matscale2(cls, in_tensors, qrec=None):
assert qrec.in_qs[0].bits == qrec.in_qs[1].bits
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
res = np.multiply(in_tensors[0], in_tensors[1], dtype=np.int32)
res = qrec.out_qs[0].reduce_from(res, q_calc)
return res
@params_type(ExpressionFusionParameters)
@qrec_type('symmetric', 'scaled')
class ExpressionSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = qrec.prepare_inputs(params, in_tensors, ktype="symmetric")
details = kwargs.get('details')
if details is not None:
results = {}
else:
results = None
in_vars = {params.input_symbols[i]: in_tensor.copy()
for i, in_tensor in enumerate(in_tensors)}
out_vars = qrec.cache['qfunc_col'](**in_vars, track_results=results)
out_tensors = [out_vars[out_sym_name]
for out_sym_name in params.output_symbols]
if details is not None:
details['results'] = results
return qrec.get_outputs(params, out_tensors, ktype="symmetric")
@params_type(MatMulOpParameters, MatMulTransposedParameters)
@qrec_type('scaled')
class MatMulScaled(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = [in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")]
if isinstance(params, MatMulTransposedParameters):
mat1, mat2 = in_tensors[0], np.swapaxes(in_tensors[1], -2, -1)
else:
mat1, mat2 = in_tensors[0], in_tensors[1]
mat2 = mat2.astype(np.int32) - qrec.in_qs[1].zero_point.astype(np.int32)
if len(in_tensors) > 2:
biases = in_tensors[2]
if len(biases.shape) == 1:
if biases.shape[0] == mat1.shape[0]:
biases = np.expand_dims(biases, -1)
else:
biases = 0
out_tensor = np.matmul(mat1, mat2) + biases
out_rank = len(out_tensor.shape)
mul_biases_q = qrec.cache['mul_biases_q']
scale_axis = None if len(mul_biases_q.scale) == 1 else \
(out_rank-1 if isinstance(params, MatMulTransposedParameters) else out_rank-2)
out_tensor = mul_biases_q.apply_scales(out_tensor, scale_axis)
return qrec.get_outputs(params, [out_tensor], ktype="symmetric")
@params_type(MatMulOpParameters, MatMulTransposedParameters)
@qrec_type('symmetric')
class MatMulSymmetric(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensors = [in_tensor.astype(np.int32) for in_tensor in qrec.prepare_inputs(
params, in_tensors, ktype="symmetric")]
if isinstance(params, MatMulTransposedParameters):
mat1, mat2 = in_tensors[0], np.swapaxes(in_tensors[1], -2, -1)
else:
mat1, mat2 = in_tensors[0], in_tensors[1]
if len(in_tensors) > 2:
biases = in_tensors[2]
if len(biases.shape) == 1:
if biases.shape[0] == mat1.shape[0]:
biases = np.expand_dims(biases, -1)
else:
biases = 0
# expect biases in in_q1 + in_q2
q_calc = QType.Pow2(bits=32, q=qrec.in_qs[0].q + qrec.in_qs[1].q, signed=True)
out_tensor = np.matmul(mat1, mat2) + biases
out_tensor = qrec.out_qs[0].reduce_from(out_tensor, q_calc)
return qrec.get_outputs(params, [out_tensor], ktype="symmetric") | 0.71889 | 0.266399 |
import sys
import argparse
from ruamel.yaml import YAML
from pathlib import Path
from jinja2 import Template
TEMPLATE_DIR = "templates"
DOCKER_DIR = "docker"
CONFIG_FILE = "cfg.yaml"
def parse_args(*args) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Update the workflow and dockerfiles for CI"
)
parser.add_argument(
"-c",
"--config-file",
dest="config_file",
type=str,
help="Input YAML file with configuration for generating workflow and docker files.",
default=CONFIG_FILE,
)
parser.add_argument(
"-d",
"--docker-dir",
dest="docker_dir",
type=str,
help="Directory to output supporting docker files.",
default=DOCKER_DIR,
)
parser.add_argument(
"-t",
"--template-dir",
dest="template_dir",
type=str,
help="Directory containing input workflow YAML templates.",
default=TEMPLATE_DIR,
)
parser.add_argument(
"-w",
"--workflow",
dest="workflow_name",
type=str,
choices=["draft", "prerelease"],
)
return parser.parse_args(*args)
def update_workflow(config_file: str, template_dir: str, workflow_name: str) -> None:
with Path(f"{config_file}").open() as fp:
yaml = YAML()
config = yaml.load(fp.read())
with Path(f"{template_dir}/{workflow_name}/workflow-job.tmpl").open() as fp:
job = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-direct-ps.tmpl").open() as fp:
step_direct_ps = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-direct-sh.tmpl").open() as fp:
step_direct_sh = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-docker.tmpl").open() as fp:
step_docker = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-header.yaml").open() as fp:
new_workflow = fp.read()
for os, data in config["jobs"].items():
jt = Template(job)
if data["docker"]:
# multiple steps per job
new_workflow = new_workflow + jt.render(os=os, runs_on="ubuntu-latest")
for version in data["versions"]:
st = Template(step_docker)
new_workflow += st.render(os=os, version=version)
else:
for version in data["versions"]:
runs_on = f"{os}-{version}"
new_workflow = new_workflow + jt.render(os=os, runs_on=runs_on)
st = Template(step_direct_ps if data.get("powershell") else step_direct_sh)
new_workflow += st.render(os=os, version=version)
if workflow_name == "draft":
# we have to merge it in
with Path("../.github/workflows/create-draft-release.yml").open() as fp:
existing_workflow_lines = fp.readlines()
for line in range(len(existing_workflow_lines)):
if "## @@@" in existing_workflow_lines[line]:
existing_workflow = "".join(existing_workflow_lines[:(line + 1)])
break
assert existing_workflow, "marker not found in create-draft-release?"
existing_workflow += new_workflow
with Path("../.github/workflows/create-draft-release.yml").open("w") as fp:
fp.write(existing_workflow)
elif workflow_name == "prerelease":
# we can replace it
with Path("../.github/workflows/check-pre-release.yml").open("w") as fp:
fp.write(new_workflow)
def update_dockerfiles(config_file: str, template_dir: str, workflow_name: str, docker_dir: str) -> None:
with Path(f"{config_file}").open() as fp:
yaml = YAML()
config = yaml.load(fp.read())
with Path(f"{template_dir}/{workflow_name}/Dockerfile.tmpl").open() as fp:
dockerfile = fp.read()
files = []
for os, data in config["jobs"].items():
if not data["docker"]:
continue
for version in data["versions"]:
dt = Template(dockerfile)
filename = f"{docker_dir}/{workflow_name}/Dockerfile.{os}-{version}"
with Path(filename).open("w") as fp:
fp.write(dt.render(os=os, version=version))
files += [filename]
print(f"Updated files in {docker_dir}")
def main(*sys_args):
args = parse_args(*sys_args)
update_dockerfiles(args.config_file, args.template_dir, args.workflow_name, args.docker_dir)
update_workflow(args.config_file, args.template_dir, args.workflow_name)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) | tests/gen.py | import sys
import argparse
from ruamel.yaml import YAML
from pathlib import Path
from jinja2 import Template
TEMPLATE_DIR = "templates"
DOCKER_DIR = "docker"
CONFIG_FILE = "cfg.yaml"
def parse_args(*args) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Update the workflow and dockerfiles for CI"
)
parser.add_argument(
"-c",
"--config-file",
dest="config_file",
type=str,
help="Input YAML file with configuration for generating workflow and docker files.",
default=CONFIG_FILE,
)
parser.add_argument(
"-d",
"--docker-dir",
dest="docker_dir",
type=str,
help="Directory to output supporting docker files.",
default=DOCKER_DIR,
)
parser.add_argument(
"-t",
"--template-dir",
dest="template_dir",
type=str,
help="Directory containing input workflow YAML templates.",
default=TEMPLATE_DIR,
)
parser.add_argument(
"-w",
"--workflow",
dest="workflow_name",
type=str,
choices=["draft", "prerelease"],
)
return parser.parse_args(*args)
def update_workflow(config_file: str, template_dir: str, workflow_name: str) -> None:
with Path(f"{config_file}").open() as fp:
yaml = YAML()
config = yaml.load(fp.read())
with Path(f"{template_dir}/{workflow_name}/workflow-job.tmpl").open() as fp:
job = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-direct-ps.tmpl").open() as fp:
step_direct_ps = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-direct-sh.tmpl").open() as fp:
step_direct_sh = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-step-docker.tmpl").open() as fp:
step_docker = fp.read()
with Path(f"{template_dir}/{workflow_name}/workflow-header.yaml").open() as fp:
new_workflow = fp.read()
for os, data in config["jobs"].items():
jt = Template(job)
if data["docker"]:
# multiple steps per job
new_workflow = new_workflow + jt.render(os=os, runs_on="ubuntu-latest")
for version in data["versions"]:
st = Template(step_docker)
new_workflow += st.render(os=os, version=version)
else:
for version in data["versions"]:
runs_on = f"{os}-{version}"
new_workflow = new_workflow + jt.render(os=os, runs_on=runs_on)
st = Template(step_direct_ps if data.get("powershell") else step_direct_sh)
new_workflow += st.render(os=os, version=version)
if workflow_name == "draft":
# we have to merge it in
with Path("../.github/workflows/create-draft-release.yml").open() as fp:
existing_workflow_lines = fp.readlines()
for line in range(len(existing_workflow_lines)):
if "## @@@" in existing_workflow_lines[line]:
existing_workflow = "".join(existing_workflow_lines[:(line + 1)])
break
assert existing_workflow, "marker not found in create-draft-release?"
existing_workflow += new_workflow
with Path("../.github/workflows/create-draft-release.yml").open("w") as fp:
fp.write(existing_workflow)
elif workflow_name == "prerelease":
# we can replace it
with Path("../.github/workflows/check-pre-release.yml").open("w") as fp:
fp.write(new_workflow)
def update_dockerfiles(config_file: str, template_dir: str, workflow_name: str, docker_dir: str) -> None:
with Path(f"{config_file}").open() as fp:
yaml = YAML()
config = yaml.load(fp.read())
with Path(f"{template_dir}/{workflow_name}/Dockerfile.tmpl").open() as fp:
dockerfile = fp.read()
files = []
for os, data in config["jobs"].items():
if not data["docker"]:
continue
for version in data["versions"]:
dt = Template(dockerfile)
filename = f"{docker_dir}/{workflow_name}/Dockerfile.{os}-{version}"
with Path(filename).open("w") as fp:
fp.write(dt.render(os=os, version=version))
files += [filename]
print(f"Updated files in {docker_dir}")
def main(*sys_args):
args = parse_args(*sys_args)
update_dockerfiles(args.config_file, args.template_dir, args.workflow_name, args.docker_dir)
update_workflow(args.config_file, args.template_dir, args.workflow_name)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) | 0.260013 | 0.162712 |
import sys
from utilities import *
from ops import *
VERBOSE = "*reidentification_verbose.json"
def dispatcher(indir):
token_locations = {}
tpPerBrowser = {}; tpPerBrowser_perCrawl = {}
alexa_tpPerBrowser = {}; alexa_tpPerBrowser_perCrawl = {}
tw_tpPerBrowser = {}; tw_tpPerBrowser_perCrawl = {}
tw_token_locations = {}
alexa_token_locations = {}
fpPerTp = {}
tw_fpPerTp = {}
alexa_fpPerTp = {}
alexa_allTps = {}
tw_allTps = {}
allTps = {}
# parse verbose data
for file in glob.glob(indir + VERBOSE):
data = readJson(file)
date = getDateOfCrawl(file)
browser = getBrowser(file)
if "alexa" in file:
alexa_tpPerBrowser = get_tpPerBrowser(data, browser, alexa_tpPerBrowser) #B1
alexa_token_locations = get_tokenLocations(data, alexa_token_locations, browser) #G10
alexa_fpPerTp = get_numOffirstpPerThirdp(data, browser, alexa_fpPerTp) #B2
if date not in alexa_allTps: alexa_allTps[date] = []
alexa_allTps[date].append(list(data.keys()))
alexa_tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, alexa_tpPerBrowser_perCrawl)
if "twitter" in file:
tw_tpPerBrowser = get_tpPerBrowser(data, browser, tw_tpPerBrowser) #B1
tw_token_locations = get_tokenLocations(data, tw_token_locations, browser) #G10
tw_fpPerTp = get_numOffirstpPerThirdp(data, browser, tw_fpPerTp) #B2
if date not in tw_allTps: tw_allTps[date] = []
tw_allTps[date].append(list(data.keys()))
tw_tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, tw_tpPerBrowser_perCrawl)
if date not in allTps: allTps[date] = []
allTps[date].append(list(data.keys()))
token_locations = get_tokenLocations(data, token_locations, browser) #G10
tpPerBrowser = get_tpPerBrowser(data, browser, tpPerBrowser) #B1
fpPerTp = get_numOffirstpPerThirdp(data, browser, fpPerTp) #B2
tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, tpPerBrowser_perCrawl)
#start plotting results
plot_tpPerBrowser(tpPerBrowser, alexa_tpPerBrowser, tw_tpPerBrowser)
plot_tpPerBrowser_perc(tpPerBrowser_perCrawl, alexa_tpPerBrowser_perCrawl, tw_tpPerBrowser_perCrawl,
allTps, alexa_allTps, tw_allTps) #B1
plot_perCategories(indir + "content_categories/") #D6
plot_tokenLocations(token_locations,"all") #G10
plot_tokenLocations(tw_token_locations,"twitter") #G10
plot_tokenLocations(alexa_token_locations,"alexa") #G10
plot_numOffirstpPerThirdp(fpPerTp, "all") #B2
plot_numOffirstpPerThirdp(tw_fpPerTp, "twitter") #B2
plot_numOffirstpPerThirdp(alexa_fpPerTp, "alexa") #B2
if len(sys.argv) < 2: raise Exception("Error No input dir given")
indir = sys.argv[1]
creat_folderTree()
dispatcher(indir) | measurements-scripts/measurements.py |
import sys
from utilities import *
from ops import *
VERBOSE = "*reidentification_verbose.json"
def dispatcher(indir):
token_locations = {}
tpPerBrowser = {}; tpPerBrowser_perCrawl = {}
alexa_tpPerBrowser = {}; alexa_tpPerBrowser_perCrawl = {}
tw_tpPerBrowser = {}; tw_tpPerBrowser_perCrawl = {}
tw_token_locations = {}
alexa_token_locations = {}
fpPerTp = {}
tw_fpPerTp = {}
alexa_fpPerTp = {}
alexa_allTps = {}
tw_allTps = {}
allTps = {}
# parse verbose data
for file in glob.glob(indir + VERBOSE):
data = readJson(file)
date = getDateOfCrawl(file)
browser = getBrowser(file)
if "alexa" in file:
alexa_tpPerBrowser = get_tpPerBrowser(data, browser, alexa_tpPerBrowser) #B1
alexa_token_locations = get_tokenLocations(data, alexa_token_locations, browser) #G10
alexa_fpPerTp = get_numOffirstpPerThirdp(data, browser, alexa_fpPerTp) #B2
if date not in alexa_allTps: alexa_allTps[date] = []
alexa_allTps[date].append(list(data.keys()))
alexa_tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, alexa_tpPerBrowser_perCrawl)
if "twitter" in file:
tw_tpPerBrowser = get_tpPerBrowser(data, browser, tw_tpPerBrowser) #B1
tw_token_locations = get_tokenLocations(data, tw_token_locations, browser) #G10
tw_fpPerTp = get_numOffirstpPerThirdp(data, browser, tw_fpPerTp) #B2
if date not in tw_allTps: tw_allTps[date] = []
tw_allTps[date].append(list(data.keys()))
tw_tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, tw_tpPerBrowser_perCrawl)
if date not in allTps: allTps[date] = []
allTps[date].append(list(data.keys()))
token_locations = get_tokenLocations(data, token_locations, browser) #G10
tpPerBrowser = get_tpPerBrowser(data, browser, tpPerBrowser) #B1
fpPerTp = get_numOffirstpPerThirdp(data, browser, fpPerTp) #B2
tpPerBrowser_perCrawl = get_tpPerBrowser_perc(data, browser, date, tpPerBrowser_perCrawl)
#start plotting results
plot_tpPerBrowser(tpPerBrowser, alexa_tpPerBrowser, tw_tpPerBrowser)
plot_tpPerBrowser_perc(tpPerBrowser_perCrawl, alexa_tpPerBrowser_perCrawl, tw_tpPerBrowser_perCrawl,
allTps, alexa_allTps, tw_allTps) #B1
plot_perCategories(indir + "content_categories/") #D6
plot_tokenLocations(token_locations,"all") #G10
plot_tokenLocations(tw_token_locations,"twitter") #G10
plot_tokenLocations(alexa_token_locations,"alexa") #G10
plot_numOffirstpPerThirdp(fpPerTp, "all") #B2
plot_numOffirstpPerThirdp(tw_fpPerTp, "twitter") #B2
plot_numOffirstpPerThirdp(alexa_fpPerTp, "alexa") #B2
if len(sys.argv) < 2: raise Exception("Error No input dir given")
indir = sys.argv[1]
creat_folderTree()
dispatcher(indir) | 0.104323 | 0.078466 |
import pytest
import math
from timeit import timeit
import time
from astropy import units as u
from pocs.filterwheel.simulator import FilterWheel as SimFilterWheel
from pocs.camera.simulator import Camera as SimCamera
from pocs.utils import error
@pytest.fixture(scope='module')
def filterwheel():
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1 * u.second,
timeout=0.5 * u.second)
return sim_filterwheel
# intialisation
def test_init(filterwheel):
assert isinstance(filterwheel, SimFilterWheel)
assert filterwheel.is_connected
def test_camera_init():
sim_camera = SimCamera(filterwheel={'model': 'simulator',
'filter_names': ['one', 'deux', 'drei', 'quattro']})
assert isinstance(sim_camera.filterwheel, SimFilterWheel)
assert sim_camera.filterwheel.is_connected
assert sim_camera.filterwheel.uid
assert sim_camera.filterwheel.camera is sim_camera
def test_camera_no_filterwheel():
sim_camera = SimCamera()
assert sim_camera.filterwheel is None
def test_camera_association_on_init():
sim_camera = SimCamera()
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
camera=sim_camera)
assert sim_filterwheel.camera is sim_camera
def test_with_no_name():
with pytest.raises(ValueError):
sim_filterwheel = SimFilterWheel()
# Basic property getting and (not) setting
def test_model(filterwheel):
model = filterwheel.model
assert model == 'simulator'
with pytest.raises(AttributeError):
filterwheel.model = "Airfix"
def test_name(filterwheel):
name = filterwheel.name
assert name == 'Simulated Filter Wheel'
with pytest.raises(AttributeError):
filterwheel.name = "Phillip"
def test_uid(filterwheel):
uid = filterwheel.uid
assert uid.startswith('SW')
assert len(uid) == 6
with pytest.raises(AttributeError):
filterwheel.uid = "Can't touch this"
def test_filter_names(filterwheel):
names = filterwheel.filter_names
assert isinstance(names, list)
for name in names:
assert isinstance(name, str)
with pytest.raises(AttributeError):
filterwheel.filter_names = ["Unsharp mask", "Gaussian blur"]
# Movement
def test_move_number(filterwheel):
assert filterwheel.position == 1
e = filterwheel.move_to(2)
assert math.isnan(filterwheel.position) # position is NaN while between filters
e.wait()
assert filterwheel.position == 2
e = filterwheel.move_to(3, blocking=True)
assert e.is_set()
assert filterwheel.position == 3
filterwheel.position = 4 # Move by assignment to position property blocks until complete
assert filterwheel.position == 4
def test_move_bad_number(filterwheel):
with pytest.raises(ValueError):
filterwheel.move_to(0, blocking=True) # No zero based numbering here!
with pytest.raises(ValueError):
filterwheel.move_to(-1, blocking=True) # Definitely not
with pytest.raises(ValueError):
filterwheel.position = 99 # Problems.
with pytest.raises(ValueError):
filterwheel.move_to(filterwheel._n_positions + 1, blocking=True) # Close, but...
filterwheel.move_to(filterwheel._n_positions, blocking=True) # OK
def test_move_name(filterwheel, caplog):
filterwheel.position = 1 # Start from a known position
e = filterwheel.move_to('quattro')
assert filterwheel.current_filter == 'UNKNOWN' # I'm between filters right now
e.wait()
assert filterwheel.current_filter == 'quattro'
e = filterwheel.move_to('o', blocking=True) # Matches leading substrings too
assert filterwheel.current_filter == 'one'
filterwheel.position = 'd' # In case of multiple matches logs a warning & uses the first match
assert filterwheel.current_filter == 'deux'
# WARNING followed by INFO level record about the move
assert caplog.records[-2].levelname == 'WARNING'
assert caplog.records[-1].levelname == 'INFO'
filterwheel.position = 'deux' # Check null move. Earlier version of simulator failed this!
assert filterwheel.current_filter == 'deux'
def test_move_bad_name(filterwheel):
with pytest.raises(ValueError):
filterwheel.move_to('cinco')
def test_move_timeout(caplog):
slow_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1,
timeout=0.2)
slow_filterwheel.position = 4 # Move should take 0.3 seconds, more than timeout.
time.sleep(0.001) # For some reason takes a moment for the error to get logged.
assert caplog.records[-1].levelname == 'ERROR' # Should have logged an ERROR by now
# It raises a pocs.utils.error.Timeout exception too, but because it's in another Thread it
# doesn't get passes up to the calling code.
@pytest.mark.parametrize("name,bidirectional, expected",
[("monodirectional", False, 0.3),
("bidirectional", True, 0.1)])
def test_move_times(name, bidirectional, expected):
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1 * u.second,
move_bidirectional=bidirectional,
timeout=0.5 * u.second)
sim_filterwheel.position = 1
assert timeit("sim_filterwheel.position = 2", number=1, globals=locals()) == \
pytest.approx(0.1, rel=4e-2)
assert timeit("sim_filterwheel.position = 4", number=1, globals=locals()) == \
pytest.approx(0.2, rel=4e-2)
assert timeit("sim_filterwheel.position = 3", number=1, globals=locals()) == \
pytest.approx(expected, rel=4e-2)
def test_move_exposing(tmpdir, caplog):
sim_camera = SimCamera(filterwheel={'model': 'simulator',
'filter_names': ['one', 'deux', 'drei', 'quattro']})
fits_path = str(tmpdir.join('test_exposure.fits'))
exp_event = sim_camera.take_exposure(filename=fits_path, seconds=0.1)
with pytest.raises(error.PanError):
sim_camera.filterwheel.move_to(2, blocking=True) # Attempt to move while camera is exposing
assert caplog.records[-1].levelname == 'ERROR'
assert sim_camera.filterwheel.position == 1 # Should not have moved
exp_event.wait()
def test_is_moving(filterwheel):
filterwheel.position = 1
assert not filterwheel.is_moving
e = filterwheel.move_to(2)
assert filterwheel.is_moving
e.wait()
assert not filterwheel.is_moving | pocs/tests/test_filterwheel.py | import pytest
import math
from timeit import timeit
import time
from astropy import units as u
from pocs.filterwheel.simulator import FilterWheel as SimFilterWheel
from pocs.camera.simulator import Camera as SimCamera
from pocs.utils import error
@pytest.fixture(scope='module')
def filterwheel():
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1 * u.second,
timeout=0.5 * u.second)
return sim_filterwheel
# intialisation
def test_init(filterwheel):
assert isinstance(filterwheel, SimFilterWheel)
assert filterwheel.is_connected
def test_camera_init():
sim_camera = SimCamera(filterwheel={'model': 'simulator',
'filter_names': ['one', 'deux', 'drei', 'quattro']})
assert isinstance(sim_camera.filterwheel, SimFilterWheel)
assert sim_camera.filterwheel.is_connected
assert sim_camera.filterwheel.uid
assert sim_camera.filterwheel.camera is sim_camera
def test_camera_no_filterwheel():
sim_camera = SimCamera()
assert sim_camera.filterwheel is None
def test_camera_association_on_init():
sim_camera = SimCamera()
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
camera=sim_camera)
assert sim_filterwheel.camera is sim_camera
def test_with_no_name():
with pytest.raises(ValueError):
sim_filterwheel = SimFilterWheel()
# Basic property getting and (not) setting
def test_model(filterwheel):
model = filterwheel.model
assert model == 'simulator'
with pytest.raises(AttributeError):
filterwheel.model = "Airfix"
def test_name(filterwheel):
name = filterwheel.name
assert name == 'Simulated Filter Wheel'
with pytest.raises(AttributeError):
filterwheel.name = "Phillip"
def test_uid(filterwheel):
uid = filterwheel.uid
assert uid.startswith('SW')
assert len(uid) == 6
with pytest.raises(AttributeError):
filterwheel.uid = "Can't touch this"
def test_filter_names(filterwheel):
names = filterwheel.filter_names
assert isinstance(names, list)
for name in names:
assert isinstance(name, str)
with pytest.raises(AttributeError):
filterwheel.filter_names = ["Unsharp mask", "Gaussian blur"]
# Movement
def test_move_number(filterwheel):
assert filterwheel.position == 1
e = filterwheel.move_to(2)
assert math.isnan(filterwheel.position) # position is NaN while between filters
e.wait()
assert filterwheel.position == 2
e = filterwheel.move_to(3, blocking=True)
assert e.is_set()
assert filterwheel.position == 3
filterwheel.position = 4 # Move by assignment to position property blocks until complete
assert filterwheel.position == 4
def test_move_bad_number(filterwheel):
with pytest.raises(ValueError):
filterwheel.move_to(0, blocking=True) # No zero based numbering here!
with pytest.raises(ValueError):
filterwheel.move_to(-1, blocking=True) # Definitely not
with pytest.raises(ValueError):
filterwheel.position = 99 # Problems.
with pytest.raises(ValueError):
filterwheel.move_to(filterwheel._n_positions + 1, blocking=True) # Close, but...
filterwheel.move_to(filterwheel._n_positions, blocking=True) # OK
def test_move_name(filterwheel, caplog):
filterwheel.position = 1 # Start from a known position
e = filterwheel.move_to('quattro')
assert filterwheel.current_filter == 'UNKNOWN' # I'm between filters right now
e.wait()
assert filterwheel.current_filter == 'quattro'
e = filterwheel.move_to('o', blocking=True) # Matches leading substrings too
assert filterwheel.current_filter == 'one'
filterwheel.position = 'd' # In case of multiple matches logs a warning & uses the first match
assert filterwheel.current_filter == 'deux'
# WARNING followed by INFO level record about the move
assert caplog.records[-2].levelname == 'WARNING'
assert caplog.records[-1].levelname == 'INFO'
filterwheel.position = 'deux' # Check null move. Earlier version of simulator failed this!
assert filterwheel.current_filter == 'deux'
def test_move_bad_name(filterwheel):
with pytest.raises(ValueError):
filterwheel.move_to('cinco')
def test_move_timeout(caplog):
slow_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1,
timeout=0.2)
slow_filterwheel.position = 4 # Move should take 0.3 seconds, more than timeout.
time.sleep(0.001) # For some reason takes a moment for the error to get logged.
assert caplog.records[-1].levelname == 'ERROR' # Should have logged an ERROR by now
# It raises a pocs.utils.error.Timeout exception too, but because it's in another Thread it
# doesn't get passes up to the calling code.
@pytest.mark.parametrize("name,bidirectional, expected",
[("monodirectional", False, 0.3),
("bidirectional", True, 0.1)])
def test_move_times(name, bidirectional, expected):
sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],
move_time=0.1 * u.second,
move_bidirectional=bidirectional,
timeout=0.5 * u.second)
sim_filterwheel.position = 1
assert timeit("sim_filterwheel.position = 2", number=1, globals=locals()) == \
pytest.approx(0.1, rel=4e-2)
assert timeit("sim_filterwheel.position = 4", number=1, globals=locals()) == \
pytest.approx(0.2, rel=4e-2)
assert timeit("sim_filterwheel.position = 3", number=1, globals=locals()) == \
pytest.approx(expected, rel=4e-2)
def test_move_exposing(tmpdir, caplog):
sim_camera = SimCamera(filterwheel={'model': 'simulator',
'filter_names': ['one', 'deux', 'drei', 'quattro']})
fits_path = str(tmpdir.join('test_exposure.fits'))
exp_event = sim_camera.take_exposure(filename=fits_path, seconds=0.1)
with pytest.raises(error.PanError):
sim_camera.filterwheel.move_to(2, blocking=True) # Attempt to move while camera is exposing
assert caplog.records[-1].levelname == 'ERROR'
assert sim_camera.filterwheel.position == 1 # Should not have moved
exp_event.wait()
def test_is_moving(filterwheel):
filterwheel.position = 1
assert not filterwheel.is_moving
e = filterwheel.move_to(2)
assert filterwheel.is_moving
e.wait()
assert not filterwheel.is_moving | 0.67662 | 0.683756 |
from collections import defaultdict
from typing import List, Set, Dict
from overrides import overrides
import torch
from allennlp.nn.decoding import DecoderState, DecoderStep
class SimpleDecoderState(DecoderState['SimpleDecoderState']):
def __init__(self,
batch_indices: List[int],
action_history: List[List[int]],
score: List[torch.autograd.Variable],
start_values: List[int] = None) -> None:
super().__init__(batch_indices, action_history, score)
self.start_values = start_values or [0] * len(batch_indices)
def is_finished(self) -> bool:
return self.action_history[0][-1] == 4
@classmethod
def combine_states(cls, states) -> 'SimpleDecoderState':
batch_indices = [batch_index for state in states for batch_index in state.batch_indices]
action_histories = [action_history for state in states for action_history in
state.action_history]
scores = [score for state in states for score in state.score]
start_values = [start_value for state in states for start_value in state.start_values]
return SimpleDecoderState(batch_indices, action_histories, scores, start_values)
def __repr__(self):
return f"{self.action_history}"
class SimpleDecoderStep(DecoderStep[SimpleDecoderState]):
def __init__(self,
valid_actions: Set[int] = None,
include_value_in_score: bool = False):
# The default allowed actions are adding 1 or 2 to the last element.
self._valid_actions = valid_actions or {1, 2}
# If True, we will add a small multiple of the action take to the score, to encourage
# getting higher numbers first (and to differentiate action sequences).
self._include_value_in_score = include_value_in_score
@overrides
def take_step(self,
state: SimpleDecoderState,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[SimpleDecoderState]:
indexed_next_states: Dict[int, List[SimpleDecoderState]] = defaultdict(list)
if not allowed_actions:
allowed_actions = [None] * len(state.batch_indices)
for batch_index, action_history, score, start_value, actions in zip(state.batch_indices,
state.action_history,
state.score,
state.start_values,
allowed_actions):
prev_action = action_history[-1] if action_history else start_value
for action in self._valid_actions:
next_item = int(prev_action + action)
if actions and next_item not in actions:
continue
new_history = action_history + [next_item]
# For every action taken, we reduce the score by 1.
new_score = score - 1
if self._include_value_in_score:
new_score += 0.01 * next_item
new_state = SimpleDecoderState([batch_index],
[new_history],
[new_score])
indexed_next_states[batch_index].append(new_state)
next_states: List[SimpleDecoderState] = []
for batch_next_states in indexed_next_states.values():
sorted_next_states = [(-state.score[0].data[0], state) for state in batch_next_states]
sorted_next_states.sort(key=lambda x: x[0])
if max_actions is not None:
sorted_next_states = sorted_next_states[:max_actions]
next_states.extend(state[1] for state in sorted_next_states)
return next_states | tests/nn/decoding/simple_transition_system.py | from collections import defaultdict
from typing import List, Set, Dict
from overrides import overrides
import torch
from allennlp.nn.decoding import DecoderState, DecoderStep
class SimpleDecoderState(DecoderState['SimpleDecoderState']):
def __init__(self,
batch_indices: List[int],
action_history: List[List[int]],
score: List[torch.autograd.Variable],
start_values: List[int] = None) -> None:
super().__init__(batch_indices, action_history, score)
self.start_values = start_values or [0] * len(batch_indices)
def is_finished(self) -> bool:
return self.action_history[0][-1] == 4
@classmethod
def combine_states(cls, states) -> 'SimpleDecoderState':
batch_indices = [batch_index for state in states for batch_index in state.batch_indices]
action_histories = [action_history for state in states for action_history in
state.action_history]
scores = [score for state in states for score in state.score]
start_values = [start_value for state in states for start_value in state.start_values]
return SimpleDecoderState(batch_indices, action_histories, scores, start_values)
def __repr__(self):
return f"{self.action_history}"
class SimpleDecoderStep(DecoderStep[SimpleDecoderState]):
def __init__(self,
valid_actions: Set[int] = None,
include_value_in_score: bool = False):
# The default allowed actions are adding 1 or 2 to the last element.
self._valid_actions = valid_actions or {1, 2}
# If True, we will add a small multiple of the action take to the score, to encourage
# getting higher numbers first (and to differentiate action sequences).
self._include_value_in_score = include_value_in_score
@overrides
def take_step(self,
state: SimpleDecoderState,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[SimpleDecoderState]:
indexed_next_states: Dict[int, List[SimpleDecoderState]] = defaultdict(list)
if not allowed_actions:
allowed_actions = [None] * len(state.batch_indices)
for batch_index, action_history, score, start_value, actions in zip(state.batch_indices,
state.action_history,
state.score,
state.start_values,
allowed_actions):
prev_action = action_history[-1] if action_history else start_value
for action in self._valid_actions:
next_item = int(prev_action + action)
if actions and next_item not in actions:
continue
new_history = action_history + [next_item]
# For every action taken, we reduce the score by 1.
new_score = score - 1
if self._include_value_in_score:
new_score += 0.01 * next_item
new_state = SimpleDecoderState([batch_index],
[new_history],
[new_score])
indexed_next_states[batch_index].append(new_state)
next_states: List[SimpleDecoderState] = []
for batch_next_states in indexed_next_states.values():
sorted_next_states = [(-state.score[0].data[0], state) for state in batch_next_states]
sorted_next_states.sort(key=lambda x: x[0])
if max_actions is not None:
sorted_next_states = sorted_next_states[:max_actions]
next_states.extend(state[1] for state in sorted_next_states)
return next_states | 0.876555 | 0.529932 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from ydb.public.api.protos import ydb_operation_pb2 as ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2
class OperationServiceStub(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/GetOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.FromString,
)
self.CancelOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/CancelOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.FromString,
)
self.ForgetOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/ForgetOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.FromString,
)
self.ListOperations = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/ListOperations',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.FromString,
)
class OperationServiceServicer(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
def GetOperation(self, request, context):
"""Check status for a given operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Starts cancellation of a long-running operation,
Clients can use GetOperation to check whether the cancellation succeeded
or whether the operation completed despite cancellation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ForgetOperation(self, request, context):
"""Forgets long-running operation. It does not cancel the operation and returns
an error if operation was not completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists operations that match the specified filter in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.SerializeToString,
),
'ForgetOperation': grpc.unary_unary_rpc_method_handler(
servicer.ForgetOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Ydb.Operation.V1.OperationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OperationService(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
@staticmethod
def GetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/GetOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CancelOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/CancelOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ForgetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/ForgetOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/ListOperations',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | ydb/public/api/grpc/ydb_operation_v1_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
from ydb.public.api.protos import ydb_operation_pb2 as ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2
class OperationServiceStub(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/GetOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.FromString,
)
self.CancelOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/CancelOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.FromString,
)
self.ForgetOperation = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/ForgetOperation',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.FromString,
)
self.ListOperations = channel.unary_unary(
'/Ydb.Operation.V1.OperationService/ListOperations',
request_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.FromString,
)
class OperationServiceServicer(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
def GetOperation(self, request, context):
"""Check status for a given operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Starts cancellation of a long-running operation,
Clients can use GetOperation to check whether the cancellation succeeded
or whether the operation completed despite cancellation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ForgetOperation(self, request, context):
"""Forgets long-running operation. It does not cancel the operation and returns
an error if operation was not completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists operations that match the specified filter in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.SerializeToString,
),
'ForgetOperation': grpc.unary_unary_rpc_method_handler(
servicer.ForgetOperation,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.FromString,
response_serializer=ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Ydb.Operation.V1.OperationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OperationService(object):
"""All rpc calls to YDB are allowed to be asynchronous. Response message
of an rpc call contains Operation structure and OperationService
is used for polling operation completion.
Operation has a field 'ready' to notify client if operation has been
completed or not. If result is ready a client has to handle 'result' field,
otherwise it is expected that client continues polling result via
GetOperation rpc of OperationService. Polling is made via unique
operation id provided in 'id' field of Operation.
Note: Currently some operations have synchronous implementation and their result
is available when response is obtained. But a client must not make any
assumptions about synchronous or asynchronous nature of any operation and
be ready to poll operation status.
"""
@staticmethod
def GetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/GetOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.GetOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CancelOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/CancelOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.CancelOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ForgetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/ForgetOperation',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ForgetOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Ydb.Operation.V1.OperationService/ListOperations',
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsRequest.SerializeToString,
ydb_dot_public_dot_api_dot_protos_dot_ydb__operation__pb2.ListOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.727007 | 0.130784 |
from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks
from objects.paginators import Paginator
from discord import Embed, Colour
import aiohttp
import random
import json
from bs4 import BeautifulSoup
BASE_URL = 'https://www.google.com/search?'
USERAGENTS = (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'
)
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} <query>'
short_doc = 'Search for images on google'
name = 'img'
aliases = (name, 'image')
category = 'Services'
bot_perms = (PermissionEmbedLinks(), )
min_args = 1
ratelimit = (1, 5)
async def on_call(self, ctx, args, **flags):
query = args[1:]
params = {
'q': query,
'tbm': 'isch',
'safe': 'off' if ctx.is_nsfw else 'strict'
}
headers = {'User-Agent': random.choice(USERAGENTS)}
proxy = random.choice([None] + list(self.bot.proxies.keys()))
async with self.bot.sess.get(BASE_URL, params=params, headers=headers, proxy=proxy) as r:
if r.status != 200:
return await ctx.error(f'Request failed: {r.status}')
soup = await self.bot.loop.run_in_executor(
None, BeautifulSoup, await r.read(), 'lxml')
elements = soup.find_all('div', class_= 'rg_meta')
metas = [json.loads(e.text) for e in elements]
images = [f'{m["ou"]}' for m in metas if m['ity'] != 'svg']
if len(images) == 0:
return await ctx.warn('No results found')
p = Paginator(self.bot)
def make_embed(page, url):
e = Embed(colour=Colour.gold(), title=query[:128], url=url)
e.set_image(url=url)
e.set_footer(
text=f'Page {page} / {len(images)}',
icon_url=ctx.author.avatar_url
)
return e
for i, url in enumerate(images):
p.add_page(embed=make_embed(i + 1, url))
await p.run(ctx) | modules/apis/module_img.py | from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks
from objects.paginators import Paginator
from discord import Embed, Colour
import aiohttp
import random
import json
from bs4 import BeautifulSoup
BASE_URL = 'https://www.google.com/search?'
USERAGENTS = (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'
)
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} <query>'
short_doc = 'Search for images on google'
name = 'img'
aliases = (name, 'image')
category = 'Services'
bot_perms = (PermissionEmbedLinks(), )
min_args = 1
ratelimit = (1, 5)
async def on_call(self, ctx, args, **flags):
query = args[1:]
params = {
'q': query,
'tbm': 'isch',
'safe': 'off' if ctx.is_nsfw else 'strict'
}
headers = {'User-Agent': random.choice(USERAGENTS)}
proxy = random.choice([None] + list(self.bot.proxies.keys()))
async with self.bot.sess.get(BASE_URL, params=params, headers=headers, proxy=proxy) as r:
if r.status != 200:
return await ctx.error(f'Request failed: {r.status}')
soup = await self.bot.loop.run_in_executor(
None, BeautifulSoup, await r.read(), 'lxml')
elements = soup.find_all('div', class_= 'rg_meta')
metas = [json.loads(e.text) for e in elements]
images = [f'{m["ou"]}' for m in metas if m['ity'] != 'svg']
if len(images) == 0:
return await ctx.warn('No results found')
p = Paginator(self.bot)
def make_embed(page, url):
e = Embed(colour=Colour.gold(), title=query[:128], url=url)
e.set_image(url=url)
e.set_footer(
text=f'Page {page} / {len(images)}',
icon_url=ctx.author.avatar_url
)
return e
for i, url in enumerate(images):
p.add_page(embed=make_embed(i + 1, url))
await p.run(ctx) | 0.343122 | 0.151122 |
from string import Template
import models
def index(app, environ, start_response):
# ...
return app.render('index.html',start_response=start_response)
def stat(app, environ, start_response):
tmplRow = '''
<tr>
<td style="text-align: center;">${region_id}</td>
<td style="text-align: center;">
<a target="_blank" href="/stat/region/${region_id}">
${region_name}
</a>
</td>
<td style="text-align: center;">${cnt}</td>
</tr>
'''
query = """
SELECT
Region.id as region_id,
Region.name as region_name,
count(*) as cnt
FROM
Feedback
JOIN City ON City.id = Feedback.city_id
JOIN Region ON Region.id = City.region_id
GROUP BY
Region.id
HAVING count(*) > 5
ORDER BY count(*) ASC
"""
cursor = app.dbpool.cursor()
cursor.execute(query)
records = cursor.fetchall()
records = [{
'region_id': item[0],
'region_name': item[1],
'cnt': item[2],
} for item in records]
# print(records)
tmpl = Template(tmplRow)
tbody = []
for record in records:
tbody.append(tmpl.substitute({
'region_id':record['region_id'],
'region_name':record['region_name'].encode('utf-8'),
'cnt':record['cnt'],
}))
# ..
context = {
'tbody': ''.join(tbody),
}
return app.render('stat.html', context=context, start_response=start_response)
def stat_region(app, environ, start_response):
# ..
(region_id, ) = environ['app.url_args']
# ..
cursor = app.dbpool.cursor()
# ..
region = models.Region(cursor)
region = region.get(id=region_id)
region = region.as_dict()
# ..
tmplRow = '''
<tr>
<td style="text-align: center;">${city_id}</td>
<td style="text-align: center;"> ${city_name}</td>
<td style="text-align: center;">${cnt}</td>
</tr>
'''
query = """
SELECT
City.id as city_id,
City.name as city_name,
count(*) as cnt
FROM
Feedback
JOIN City ON City.id = Feedback.city_id
WHERE
City.region_id = {region_id}
GROUP BY
City.id
-- HAVING count(*) > 5
ORDER BY count(*) ASC
""".format(region_id=region_id)
# ..
cursor.execute(query)
records = cursor.fetchall()
records = [{
'city_id': item[0],
'city_name': item[1].encode('utf-8'),
'cnt': item[2],
} for item in records]
# print(records)
tmpl = Template(tmplRow)
tbody = [tmpl.substitute(record) for record in records]
# ...
context = {
'region_name':region['name'].encode('utf-8'),
'tbody':''.join(tbody),
}
return app.render('stat_region.html',context=context, start_response=start_response) | images/frontface/src/views.py | from string import Template
import models
def index(app, environ, start_response):
# ...
return app.render('index.html',start_response=start_response)
def stat(app, environ, start_response):
tmplRow = '''
<tr>
<td style="text-align: center;">${region_id}</td>
<td style="text-align: center;">
<a target="_blank" href="/stat/region/${region_id}">
${region_name}
</a>
</td>
<td style="text-align: center;">${cnt}</td>
</tr>
'''
query = """
SELECT
Region.id as region_id,
Region.name as region_name,
count(*) as cnt
FROM
Feedback
JOIN City ON City.id = Feedback.city_id
JOIN Region ON Region.id = City.region_id
GROUP BY
Region.id
HAVING count(*) > 5
ORDER BY count(*) ASC
"""
cursor = app.dbpool.cursor()
cursor.execute(query)
records = cursor.fetchall()
records = [{
'region_id': item[0],
'region_name': item[1],
'cnt': item[2],
} for item in records]
# print(records)
tmpl = Template(tmplRow)
tbody = []
for record in records:
tbody.append(tmpl.substitute({
'region_id':record['region_id'],
'region_name':record['region_name'].encode('utf-8'),
'cnt':record['cnt'],
}))
# ..
context = {
'tbody': ''.join(tbody),
}
return app.render('stat.html', context=context, start_response=start_response)
def stat_region(app, environ, start_response):
# ..
(region_id, ) = environ['app.url_args']
# ..
cursor = app.dbpool.cursor()
# ..
region = models.Region(cursor)
region = region.get(id=region_id)
region = region.as_dict()
# ..
tmplRow = '''
<tr>
<td style="text-align: center;">${city_id}</td>
<td style="text-align: center;"> ${city_name}</td>
<td style="text-align: center;">${cnt}</td>
</tr>
'''
query = """
SELECT
City.id as city_id,
City.name as city_name,
count(*) as cnt
FROM
Feedback
JOIN City ON City.id = Feedback.city_id
WHERE
City.region_id = {region_id}
GROUP BY
City.id
-- HAVING count(*) > 5
ORDER BY count(*) ASC
""".format(region_id=region_id)
# ..
cursor.execute(query)
records = cursor.fetchall()
records = [{
'city_id': item[0],
'city_name': item[1].encode('utf-8'),
'cnt': item[2],
} for item in records]
# print(records)
tmpl = Template(tmplRow)
tbody = [tmpl.substitute(record) for record in records]
# ...
context = {
'region_name':region['name'].encode('utf-8'),
'tbody':''.join(tbody),
}
return app.render('stat_region.html',context=context, start_response=start_response) | 0.149004 | 0.140661 |
from reactors.runtime import Reactor
import datetime
import simplejson as json
import os
import requests
import shutil
import time
def slack_notify(message, reactor):
if reactor.settings.get('workflow', {}).get('notify', True):
try:
reactor.client.actors.sendMessage(
actorId=reactor.settings.links.slackbot,
body={
'message': '{0}: {1}'.format(reactor.actor_name, message)
})
except Exception as exc:
reactor.logger.warn(
'Failed to send Slack notification from {0}: {0}'.format(
exc, reactor.actor_name))
else:
reactor.logger.info(
'Skipped sending Slack notification from {0}'.format(
reactor.actor_name))
def main():
r = Reactor(tapis_optional=True)
# Generate timestamp
timestamp = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime())
for mcc in r.settings.mccs:
save_api(mcc, timestamp, r)
def redact_data(json_data: dict):
"""Placeholder for deny-list redaction
"""
return json_data
def save_api(mcc: int, timestamp: str, r: object):
timestamp_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
timestamp))
latest_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
'latest'))
files_to_upload = [timestamp_filename, latest_filename]
try:
r.logger.debug('Retrieving MCC {0} data from RedCAP'.format(mcc))
tok = os.environ.get('REDCAP_TOKEN',
'<KEY>')
headers = {'Token': tok}
data = {'op': 'blood', 'mcc': mcc}
resp = requests.post(r.settings.redcap.custom_api,
headers=headers,
data=data)
resp.raise_for_status()
data = resp.json()
r.logger.debug('RedCAP data retrieved.')
except Exception as exc:
slack_notify('Data retrieval from RedCAP failed: {0}'.format(exc), r)
r.on_failure(exc)
# Redact sensitive fields from API response
data = redact_data(data)
# Dump JSON data to timestamped file
with open(timestamp_filename, 'w') as jf:
json.dump(data, jf, separators=(',', ':'))
# Make a copy as 'latest'
shutil.copy2(timestamp_filename, latest_filename)
# Upload files via Tapis files
if r.settings.get('workflow', {}).get('upload', True):
r.logger.debug('Uploading files... ' + str(files_to_upload))
try:
for fn in files_to_upload:
r.logger.info('File {0}'.format(fn))
r.client.files.importData(
systemId=r.settings.tapis.storage_system,
filePath=r.settings.tapis.path,
fileToUpload=open(fn, 'rb'))
# Grant permission
r.logger.info('Setting ACL')
body = {
'username': r.settings.tapis.username,
'permission': r.settings.tapis.pem
}
report_path = os.path.join(r.settings.tapis.path,
os.path.basename(fn))
r.client.files.updatePermissions(
systemId=r.settings.tapis.storage_system,
filePath=report_path,
body=body)
except Exception as exc:
slack_notify('File uploads failed: {0}'.format(exc), r)
r.on_failure(exc)
else:
r.logger.info('Skipping uploads')
slack_notify(
'Blood Draw API data for MCC {0} was processed'.format(mcc), r)
if __name__ == '__main__':
main() | reactor.py | from reactors.runtime import Reactor
import datetime
import simplejson as json
import os
import requests
import shutil
import time
def slack_notify(message, reactor):
if reactor.settings.get('workflow', {}).get('notify', True):
try:
reactor.client.actors.sendMessage(
actorId=reactor.settings.links.slackbot,
body={
'message': '{0}: {1}'.format(reactor.actor_name, message)
})
except Exception as exc:
reactor.logger.warn(
'Failed to send Slack notification from {0}: {0}'.format(
exc, reactor.actor_name))
else:
reactor.logger.info(
'Skipped sending Slack notification from {0}'.format(
reactor.actor_name))
def main():
r = Reactor(tapis_optional=True)
# Generate timestamp
timestamp = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime())
for mcc in r.settings.mccs:
save_api(mcc, timestamp, r)
def redact_data(json_data: dict):
"""Placeholder for deny-list redaction
"""
return json_data
def save_api(mcc: int, timestamp: str, r: object):
timestamp_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
timestamp))
latest_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
'latest'))
files_to_upload = [timestamp_filename, latest_filename]
try:
r.logger.debug('Retrieving MCC {0} data from RedCAP'.format(mcc))
tok = os.environ.get('REDCAP_TOKEN',
'<KEY>')
headers = {'Token': tok}
data = {'op': 'blood', 'mcc': mcc}
resp = requests.post(r.settings.redcap.custom_api,
headers=headers,
data=data)
resp.raise_for_status()
data = resp.json()
r.logger.debug('RedCAP data retrieved.')
except Exception as exc:
slack_notify('Data retrieval from RedCAP failed: {0}'.format(exc), r)
r.on_failure(exc)
# Redact sensitive fields from API response
data = redact_data(data)
# Dump JSON data to timestamped file
with open(timestamp_filename, 'w') as jf:
json.dump(data, jf, separators=(',', ':'))
# Make a copy as 'latest'
shutil.copy2(timestamp_filename, latest_filename)
# Upload files via Tapis files
if r.settings.get('workflow', {}).get('upload', True):
r.logger.debug('Uploading files... ' + str(files_to_upload))
try:
for fn in files_to_upload:
r.logger.info('File {0}'.format(fn))
r.client.files.importData(
systemId=r.settings.tapis.storage_system,
filePath=r.settings.tapis.path,
fileToUpload=open(fn, 'rb'))
# Grant permission
r.logger.info('Setting ACL')
body = {
'username': r.settings.tapis.username,
'permission': r.settings.tapis.pem
}
report_path = os.path.join(r.settings.tapis.path,
os.path.basename(fn))
r.client.files.updatePermissions(
systemId=r.settings.tapis.storage_system,
filePath=report_path,
body=body)
except Exception as exc:
slack_notify('File uploads failed: {0}'.format(exc), r)
r.on_failure(exc)
else:
r.logger.info('Skipping uploads')
slack_notify(
'Blood Draw API data for MCC {0} was processed'.format(mcc), r)
if __name__ == '__main__':
main() | 0.433022 | 0.086825 |
import f
from utilities import io_worker as iw
from googleapiclient.discovery import build
import setting as st
from collections import defaultdict
class Google(object):
def __init__(self):
self.DEV_KEY = "YOUR KEY :) I do not forget to hide this"
self.APP_KEY = "YOUR KEY :) I do not forget to hide this"
self.engine = build("customsearch", "v1", developerKey=self.DEV_KEY)
self.count = 0
self.DIR_LOCAL = "%s/temp/Round%d/gg_spell.pkl" % (st.DIR_ROOT, st.ROUND)
try:
self.local_data = iw.load_obj_pkl(self.DIR_LOCAL)
except Exception as message:
iw.print_status(message)
self.local_data = defaultdict()
def save(self):
iw.save_obj_pkl(self.DIR_LOCAL, self.local_data)
def do_you_mean(self, text, num=10, lang=None):
corrected_text = self.local_data.get(text)
if not corrected_text:
self.count += 1
corrected_text = text
try:
if lang:
responds = (
self.engine.cse()
.list(q=text, cx=self.APP_KEY, num=num, lr="lang_%s" % lang)
.execute()
)
else:
responds = (
self.engine.cse()
.list(q=text, cx=self.APP_KEY, num=num)
.execute()
)
if responds.get("spelling", None):
corrected_text = responds["spelling"]["correctedQuery"]
except Exception as message:
iw.print_status(message)
if "Daily Limit Exceeded" in message:
raise SystemExit()
return corrected_text
import httplib
import xml.dom.minidom
data = """
<spellrequest textalreadyclipped="0" ignoredups="0" ignoredigits="1" ignoreallcaps="1">
<text> %s </text>
</spellrequest>
"""
def spellCheck(word_to_spell):
con = httplib.HTTPSConnection("www.google.com")
con.request("POST", "/tbproxy/spell?lang=en", data % word_to_spell)
response = con.getresponse()
dom = xml.dom.minidom.parseString(response.read())
dom_data = dom.getElementsByTagName("spellresult")[0]
if dom_data.childNodes:
for child_node in dom_data.childNodes:
result = child_node.firstChild.data.split()
for word in result:
if word_to_spell.upper() == word.upper():
return True
return False
else:
return True
if __name__ == "__main__":
f.init()
from autocorrect import Speller
spell = Speller()
tmp = spell("this is a vanicualtion")
gg = Google()
# tmp = gg.do_you_mean('this is a vanicualtion')
# print(f.google_search().do_you_mean('this is a vanicualtion'))
# print(f.google_search().get('this is a vanicualtion')) | api/lookup/m_google.py | import f
from utilities import io_worker as iw
from googleapiclient.discovery import build
import setting as st
from collections import defaultdict
class Google(object):
def __init__(self):
self.DEV_KEY = "YOUR KEY :) I do not forget to hide this"
self.APP_KEY = "YOUR KEY :) I do not forget to hide this"
self.engine = build("customsearch", "v1", developerKey=self.DEV_KEY)
self.count = 0
self.DIR_LOCAL = "%s/temp/Round%d/gg_spell.pkl" % (st.DIR_ROOT, st.ROUND)
try:
self.local_data = iw.load_obj_pkl(self.DIR_LOCAL)
except Exception as message:
iw.print_status(message)
self.local_data = defaultdict()
def save(self):
iw.save_obj_pkl(self.DIR_LOCAL, self.local_data)
def do_you_mean(self, text, num=10, lang=None):
corrected_text = self.local_data.get(text)
if not corrected_text:
self.count += 1
corrected_text = text
try:
if lang:
responds = (
self.engine.cse()
.list(q=text, cx=self.APP_KEY, num=num, lr="lang_%s" % lang)
.execute()
)
else:
responds = (
self.engine.cse()
.list(q=text, cx=self.APP_KEY, num=num)
.execute()
)
if responds.get("spelling", None):
corrected_text = responds["spelling"]["correctedQuery"]
except Exception as message:
iw.print_status(message)
if "Daily Limit Exceeded" in message:
raise SystemExit()
return corrected_text
import httplib
import xml.dom.minidom
data = """
<spellrequest textalreadyclipped="0" ignoredups="0" ignoredigits="1" ignoreallcaps="1">
<text> %s </text>
</spellrequest>
"""
def spellCheck(word_to_spell):
con = httplib.HTTPSConnection("www.google.com")
con.request("POST", "/tbproxy/spell?lang=en", data % word_to_spell)
response = con.getresponse()
dom = xml.dom.minidom.parseString(response.read())
dom_data = dom.getElementsByTagName("spellresult")[0]
if dom_data.childNodes:
for child_node in dom_data.childNodes:
result = child_node.firstChild.data.split()
for word in result:
if word_to_spell.upper() == word.upper():
return True
return False
else:
return True
if __name__ == "__main__":
f.init()
from autocorrect import Speller
spell = Speller()
tmp = spell("this is a vanicualtion")
gg = Google()
# tmp = gg.do_you_mean('this is a vanicualtion')
# print(f.google_search().do_you_mean('this is a vanicualtion'))
# print(f.google_search().get('this is a vanicualtion')) | 0.154759 | 0.072341 |
__all__ = ['extract']
from array import array
from functools import partial
from collections import namedtuple
from ghidra.program.model.pcode import PcodeOpAST, VarnodeAST, SequenceNumber
from ghidra.program.model.address import Address, AddressSpace
def extract(obj):
if obj == None: return None
typ = type(obj)
for cls in (typ,) + typ.__bases__:
if cls in extracts:
props, specifier = extracts[cls]
break
else:
raise ValueError('Illegal type: ' + str(typ))
if '_all' in specifier:
return specifier['_all'](obj)
ret = {}
for prop in props:
if prop.startswith('get-'):
prop = prop[4:]
val = getattr(obj, 'get' + prop.capitalize())()
else:
val = getattr(obj, prop)
if prop in specifier:
val = specifier[prop](val)
else:
val = extract(val)
ret[prop] = val
return ret
_retself = ([], {'_all': lambda data: data})
extracts = {
int: _retself,
long: _retself,
str: _retself,
unicode: _retself,
dict: ([], {'_all': lambda data: {key: extract(value)
for (key, value) in data.items()}}),
list: ([], {'_all': lambda data: [extract(value) for value in data]}),
array: ([], {'_all': lambda data: [extract(value) for value in data]}),
VarnodeAST: ([
'get-address', # Address
'addrTied', # Bool
'def', # Maybe SequenceNumber
'size', # Int
'free', # Bool
'hash', # Bool
'input', # Bool
'persistant', # Bool
'register', # Bool
'unaffected', # Bool
'unique', # Bool
'uniqueId'], # Int
{'def': lambda val: None if val == None else extract(val.seqnum) }),
PcodeOpAST: ([
'dead', # Bool
'inputs', # [VarnodeAST]
'mnemonic', # String
'seqnum', # SequenceNumber
'output', # VarnodeAST
], {}),
SequenceNumber: ([
'order', # Int
'time', # Int
'target', # Address
], {}),
Address: ([
'addressSpace', # Int
'offset', # Int
], {'addressSpace': lambda val: val.name}),
AddressSpace: ([
'baseSpaceID', # Int
'name', # String
], {}),
} | ghidra_scripts/extraction.py | __all__ = ['extract']
from array import array
from functools import partial
from collections import namedtuple
from ghidra.program.model.pcode import PcodeOpAST, VarnodeAST, SequenceNumber
from ghidra.program.model.address import Address, AddressSpace
def extract(obj):
if obj == None: return None
typ = type(obj)
for cls in (typ,) + typ.__bases__:
if cls in extracts:
props, specifier = extracts[cls]
break
else:
raise ValueError('Illegal type: ' + str(typ))
if '_all' in specifier:
return specifier['_all'](obj)
ret = {}
for prop in props:
if prop.startswith('get-'):
prop = prop[4:]
val = getattr(obj, 'get' + prop.capitalize())()
else:
val = getattr(obj, prop)
if prop in specifier:
val = specifier[prop](val)
else:
val = extract(val)
ret[prop] = val
return ret
_retself = ([], {'_all': lambda data: data})
extracts = {
int: _retself,
long: _retself,
str: _retself,
unicode: _retself,
dict: ([], {'_all': lambda data: {key: extract(value)
for (key, value) in data.items()}}),
list: ([], {'_all': lambda data: [extract(value) for value in data]}),
array: ([], {'_all': lambda data: [extract(value) for value in data]}),
VarnodeAST: ([
'get-address', # Address
'addrTied', # Bool
'def', # Maybe SequenceNumber
'size', # Int
'free', # Bool
'hash', # Bool
'input', # Bool
'persistant', # Bool
'register', # Bool
'unaffected', # Bool
'unique', # Bool
'uniqueId'], # Int
{'def': lambda val: None if val == None else extract(val.seqnum) }),
PcodeOpAST: ([
'dead', # Bool
'inputs', # [VarnodeAST]
'mnemonic', # String
'seqnum', # SequenceNumber
'output', # VarnodeAST
], {}),
SequenceNumber: ([
'order', # Int
'time', # Int
'target', # Address
], {}),
Address: ([
'addressSpace', # Int
'offset', # Int
], {'addressSpace': lambda val: val.name}),
AddressSpace: ([
'baseSpaceID', # Int
'name', # String
], {}),
} | 0.487551 | 0.189896 |
import logging
import jwt
from structlog import wrap_logger
from flask import current_app
from itsdangerous import URLSafeTimedSerializer
from werkzeug.exceptions import InternalServerError
from response_operations_ui.common.uaa import get_uaa_public_key
logger = wrap_logger(logging.getLogger(__name__))
def decode_access_token(access_token):
"""Decodes the access token provided by uaa. It's important to note that this JWT is
using RS256 as it's what uaa uses whereas other parts of the application use HS256.
"""
uaa_public_key = get_uaa_public_key()
decoded_jwt = jwt.decode(
access_token,
key=uaa_public_key,
algorithms=['RS256'],
audience='response_operations',
leeway=10
)
return decoded_jwt
def generate_email_token(email):
"""Creates a token based on a provided email address
:param email: email address of the respondent
:return: A serialised string containing the email address
"""
secret_key = current_app.config["SECRET_KEY"]
email_token_salt = current_app.config["EMAIL_TOKEN_SALT"]
# Double checking config items are set as they need to be set up correctly
if secret_key is None or email_token_salt is None:
msg = "SECRET_KEY or EMAIL_TOKEN_SALT are not configured."
logger.error(msg)
raise InternalServerError(msg)
timed_serializer = URLSafeTimedSerializer(secret_key)
return timed_serializer.dumps(email, salt=email_token_salt)
def decode_email_token(token, duration=None):
"""Decodes a token and returns the result
:param token: A serialised string
:param duration: The amount of time in seconds the token is valid for. If the token is older
then this number, an exception will be thrown. Default is None.
:return: The contents of the deserialised token
"""
logger.info('Decoding email verification token', token=token)
timed_serializer = URLSafeTimedSerializer(current_app.config["SECRET_KEY"])
email_token_salt = current_app.config["EMAIL_TOKEN_SALT"]
result = timed_serializer.loads(token, salt=email_token_salt, max_age=duration)
logger.info('Successfully decoded email verification token', token=token)
return result | response_operations_ui/common/token_decoder.py | import logging
import jwt
from structlog import wrap_logger
from flask import current_app
from itsdangerous import URLSafeTimedSerializer
from werkzeug.exceptions import InternalServerError
from response_operations_ui.common.uaa import get_uaa_public_key
logger = wrap_logger(logging.getLogger(__name__))
def decode_access_token(access_token):
"""Decodes the access token provided by uaa. It's important to note that this JWT is
using RS256 as it's what uaa uses whereas other parts of the application use HS256.
"""
uaa_public_key = get_uaa_public_key()
decoded_jwt = jwt.decode(
access_token,
key=uaa_public_key,
algorithms=['RS256'],
audience='response_operations',
leeway=10
)
return decoded_jwt
def generate_email_token(email):
"""Creates a token based on a provided email address
:param email: email address of the respondent
:return: A serialised string containing the email address
"""
secret_key = current_app.config["SECRET_KEY"]
email_token_salt = current_app.config["EMAIL_TOKEN_SALT"]
# Double checking config items are set as they need to be set up correctly
if secret_key is None or email_token_salt is None:
msg = "SECRET_KEY or EMAIL_TOKEN_SALT are not configured."
logger.error(msg)
raise InternalServerError(msg)
timed_serializer = URLSafeTimedSerializer(secret_key)
return timed_serializer.dumps(email, salt=email_token_salt)
def decode_email_token(token, duration=None):
"""Decodes a token and returns the result
:param token: A serialised string
:param duration: The amount of time in seconds the token is valid for. If the token is older
then this number, an exception will be thrown. Default is None.
:return: The contents of the deserialised token
"""
logger.info('Decoding email verification token', token=token)
timed_serializer = URLSafeTimedSerializer(current_app.config["SECRET_KEY"])
email_token_salt = current_app.config["EMAIL_TOKEN_SALT"]
result = timed_serializer.loads(token, salt=email_token_salt, max_age=duration)
logger.info('Successfully decoded email verification token', token=token)
return result | 0.70069 | 0.08698 |
import os
import pickle
import types
from UserDict import UserDict
from mglutil.util.packageFilePath import getResourceFolderWithVersion
class UserPreference(UserDict):
"""
Class to let the user define Preferences.
a preference is made of a name, a current value, a possibly empty list
of valid values.
preferences can be added using the add method
and set using the set method
"""
def __init__(self, ):
UserDict.__init__(self)
self.dirty = 0 # used to remember that something changed
self.resourceFile = None
resourceFolder = getResourceFolderWithVersion()
if resourceFolder is None:
return
self.resourceFile = os.path.join(resourceFolder, '.settings')
self.defaults = {}
self.settings = {}
if os.path.exists(self.resourceFile):
try:
pkl_file = open(self.resourceFile)
self.settings = pickle.load(pkl_file)
pkl_file.close()
except Exception, inst:
print inst, "Error in ", __file__
def add(self, name, value, validValues = [], validateFunc=None,
callbackFunc=[], doc='', category="General"):
"""add a userpreference. A name and a value are required,
a list of valide values can be provoded as well as a function that
can be called to validate the value. A callback function can be
specified, it will be called when the value is set with the old value
and the new value passed as an argument"""
# if name in self.data.keys():
# # doesn't create the userpreference if the name already exists:
# return
if len(validValues):
assert value in validValues
if validateFunc:
assert callable(validateFunc)
if callbackFunc != []:
assert type(callbackFunc) is types.ListType and \
len(filter(lambda x: not callable(x), callbackFunc))==0
self[name] = { 'value':value, 'validValues':validValues,
'validateFunc': validateFunc,
'callbackFunc': callbackFunc,
'doc':doc ,
'category':category}
self.set(name, value)
self.dirty = 1
def set(self, name, value):
if not self.data.has_key(name):
self.settings[name] = value
return
if self.resourceFile is None:
return
self.settings[name] = value
entry = self.data[name]
try:
if entry.has_key('validValues') and len(entry['validValues']):
if not value in entry['validValues']:
#msg = " is not a valid value, value has to be in %s" % str(entry['validValues'])
#print value, msg
return
if entry.has_key('validateFunc') and entry['validateFunc']:
if not entry['validateFunc'](value):
msg = " is not a valid value, try the Info button"
#print value, msg
return
except Exception, inst:
print __file__, inst
oldValue = entry['value']
entry['value'] = value
if entry['callbackFunc']!=[]:
for cb in entry['callbackFunc']:
cb(name,oldValue, value)
self.dirty = 1
def addCallback(self, name, func):
assert callable(func)
assert self.data.has_key(name)
entry = self.data[name]
entry['callbackFunc'].append(func)
def removeCallback(self, name, func):
assert self.data.has_key(name) and \
func in self.data[name]['callbackFunc']
entry = self.data[name]
entry['callbackFunc'].remove(func)
def save(self, filename):
"""save the preferences to a file"""
pass
self.dirty = 0 # clean now !
def loadSettings(self):
if self.resourceFile is None:
return
settings = {}
if os.path.exists(self.resourceFile):
pkl_file = open(self.resourceFile)
settings = pickle.load(pkl_file)
pkl_file.close()
for key, value in settings.items():
self.set(key, value)
def saveAllSettings(self):
output = open(self.resourceFile, 'w')
pickle.dump(self.settings, output)
output.close()
def saveSingleSetting(self, name, value):
if os.path.exists(self.resourceFile):
pkl_file = open(self.resourceFile)
settings = pickle.load(pkl_file)
pkl_file.close()
else:
settings = {}
settings[name] = value
output = open(self.resourceFile, 'w')
pickle.dump(settings, output)
output.close()
def saveDefaults(self):
if self.resourceFile is None:
return
for key, value in self.data.items():
self.defaults[key] = value
def restoreDefaults(self):
for key, value in self.defaults.items():
self.set(key, value) | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/mglutil/preferences.py | import os
import pickle
import types
from UserDict import UserDict
from mglutil.util.packageFilePath import getResourceFolderWithVersion
class UserPreference(UserDict):
"""
Class to let the user define Preferences.
a preference is made of a name, a current value, a possibly empty list
of valid values.
preferences can be added using the add method
and set using the set method
"""
def __init__(self, ):
UserDict.__init__(self)
self.dirty = 0 # used to remember that something changed
self.resourceFile = None
resourceFolder = getResourceFolderWithVersion()
if resourceFolder is None:
return
self.resourceFile = os.path.join(resourceFolder, '.settings')
self.defaults = {}
self.settings = {}
if os.path.exists(self.resourceFile):
try:
pkl_file = open(self.resourceFile)
self.settings = pickle.load(pkl_file)
pkl_file.close()
except Exception, inst:
print inst, "Error in ", __file__
def add(self, name, value, validValues = [], validateFunc=None,
callbackFunc=[], doc='', category="General"):
"""add a userpreference. A name and a value are required,
a list of valide values can be provoded as well as a function that
can be called to validate the value. A callback function can be
specified, it will be called when the value is set with the old value
and the new value passed as an argument"""
# if name in self.data.keys():
# # doesn't create the userpreference if the name already exists:
# return
if len(validValues):
assert value in validValues
if validateFunc:
assert callable(validateFunc)
if callbackFunc != []:
assert type(callbackFunc) is types.ListType and \
len(filter(lambda x: not callable(x), callbackFunc))==0
self[name] = { 'value':value, 'validValues':validValues,
'validateFunc': validateFunc,
'callbackFunc': callbackFunc,
'doc':doc ,
'category':category}
self.set(name, value)
self.dirty = 1
def set(self, name, value):
if not self.data.has_key(name):
self.settings[name] = value
return
if self.resourceFile is None:
return
self.settings[name] = value
entry = self.data[name]
try:
if entry.has_key('validValues') and len(entry['validValues']):
if not value in entry['validValues']:
#msg = " is not a valid value, value has to be in %s" % str(entry['validValues'])
#print value, msg
return
if entry.has_key('validateFunc') and entry['validateFunc']:
if not entry['validateFunc'](value):
msg = " is not a valid value, try the Info button"
#print value, msg
return
except Exception, inst:
print __file__, inst
oldValue = entry['value']
entry['value'] = value
if entry['callbackFunc']!=[]:
for cb in entry['callbackFunc']:
cb(name,oldValue, value)
self.dirty = 1
def addCallback(self, name, func):
assert callable(func)
assert self.data.has_key(name)
entry = self.data[name]
entry['callbackFunc'].append(func)
def removeCallback(self, name, func):
assert self.data.has_key(name) and \
func in self.data[name]['callbackFunc']
entry = self.data[name]
entry['callbackFunc'].remove(func)
def save(self, filename):
"""save the preferences to a file"""
pass
self.dirty = 0 # clean now !
def loadSettings(self):
if self.resourceFile is None:
return
settings = {}
if os.path.exists(self.resourceFile):
pkl_file = open(self.resourceFile)
settings = pickle.load(pkl_file)
pkl_file.close()
for key, value in settings.items():
self.set(key, value)
def saveAllSettings(self):
output = open(self.resourceFile, 'w')
pickle.dump(self.settings, output)
output.close()
def saveSingleSetting(self, name, value):
if os.path.exists(self.resourceFile):
pkl_file = open(self.resourceFile)
settings = pickle.load(pkl_file)
pkl_file.close()
else:
settings = {}
settings[name] = value
output = open(self.resourceFile, 'w')
pickle.dump(settings, output)
output.close()
def saveDefaults(self):
if self.resourceFile is None:
return
for key, value in self.data.items():
self.defaults[key] = value
def restoreDefaults(self):
for key, value in self.defaults.items():
self.set(key, value) | 0.310799 | 0.243283 |
import REESMath.vector3 as V3
import REESMath.quaternion as Q
import REESMath.coordsys as C
import REESMath.matrix3 as M3
import numpy as np
class MaterialBehaviour:
def __init__(self):
self.mu = V3.ones() # Coefficients of Friction
self.epsilon = 0.0 # Coefficient of restitution
class MaterialLibrary:
def __init__(self):
self.storage = dict()
self.storage[('default', 'default')] = MaterialBehaviour()
def get_behaviour(self, A, B):
key = (A, B) if A < B else (B, A)
if key in self.storage:
return self.storage[key]
return self.storage[('default', 'default')]
def exist_behaviour(self, A, B):
key = (A, B) if A < B else (B, A)
if key in self.storage:
return True
return False
def exist_material(self, name):
for key in self.storage:
if name in key:
return True
return False
class KeyframeMotion:
def __init__(self):
self.keyframes = []
def create_keyframe(self, time, r, q):
self.keyframes.append([time, r, q])
def clear(self):
self.keyframes.clear()
class ForceCalculator:
def __init__(self, force_type, name):
self.force_type = force_type
self.name = name
class Gravity(ForceCalculator):
def __init__(self, name):
super().__init__('Gravity', name)
self.g = 9.81 # Acceleration of gravity
self.up = V3.j() # Up direction
def compute(self, body, r, q, v, w):
F = - body.mass * self.g * self.up
T = V3.zero()
return F, T
class Damping(ForceCalculator):
def __init__(self, name):
super().__init__('Damping', name)
self.alpha = 0.001 # Linear damping
self.beta = 0.001 # Angular damping
def compute(self, body, r, q, v, w):
F = - v * self.alpha
T = - w * self.beta
return F, T
class Shape:
def __init__(self, name):
self.name = name
self.mesh = None # Polygonal mesh assumed to be in body frame coordinates
self.mass = 0.0 # Total mass of shape assuming unit-mass-density
self.inertia = V3.zero() # Body frame inertia tensor assumping unit-mass-density
self.r = V3.zero() # Translation from body frame to model frame
self.q = Q.identity() # Rotation from body frame to model frame
class RigidBody:
def __init__(self, name):
self.name = name
self.q = Q.identity() # Orientation stored as a quaternion
self.r = V3.zero() # Center of mass position
self.v = V3.zero() # Linear velocity
self.w = V3.zero() # Angular velocity
self.mass = 0.0 # Total mass
self.inertia = V3.zero() # Body frame inertia tensor
self.is_fixed = False
self.is_scripted = False
self.is_free = True
self.is_active = True
self.use_finite_update = True # Toggle between infinitesimal and finite updates of orientation
self.finite_update_rotation_axis = None # If a vector3 is given then finite updates are done wrt. this axis
self.forces = [] # External forces (like gravity and damping) acting on this body
self.material = 'default' # The material this rigid body is made up of
self.shape = None # Geometry/Shape of rigid body
self.kdop = None # World space kdop bvh
self.joints = [] # Joints connected to this body
self.scripted_motion = None
self.visual_material = None # string value that makes a reference to the name of the visual material to use when rendering this body.
class Constraint:
def __init__(self, txt, bodyA, bodyB):
self.type = txt
self.bodyA = bodyA
self.bodyB = bodyB
class ContactPoint(Constraint):
def __init__(self, bodyA, bodyB, position=V3.zero(), normal=V3.k(), gap=0.0):
super().__init__('ContactPoint', bodyA, bodyB)
if abs(1.0 - V3.norm(normal)) > 0.1:
raise RuntimeError('ContactPoint.init() was called with non-unit size normal')
self.p = position
self.n = normal
self.g = gap
def compute_jacobians(self):
rA = self.p - self.bodyA.r
rB = self.p - self.bodyB.r
s, t, n = V3.make_orthonormal_vectors(self.n)
JA_v = - np.array([n, s, t, V3.zero()], dtype=np.float64)
JA_w = - np.array([V3.cross(rA, n), V3.cross(rA, s), V3.cross(rA, t), n], dtype=np.float64)
JB_v = np.array([n, s, t, V3.zero()], dtype=np.float64)
JB_w = np.array([V3.cross(rB, n), V3.cross(rB, s), V3.cross(rB, t), n], dtype=np.float64)
return JA_v, JA_w, JB_v, JB_w
def compute_error_terms(self, fps, error_reduction):
if fps < 0.0:
raise RuntimeError('compute_error_terms() Illegal fps value')
if fps > 200.0:
raise RuntimeWarning('compute_error_terms() Unlikely large fps value')
k_correction = fps * error_reduction
b = k_correction * self.g
return V3.make_vec4(0, 0, b, 0)
@staticmethod
def dimensions():
return 4
class JointConnector:
def __init__(self, body):
self.body = body
self.transform = C.CoordSys()
def get_local_anchor(self):
return self.transform.r
def get_local_axis_1(self):
return Q.rotate(self.transform.q, V3.i())
def get_local_axis_2(self):
return Q.rotate(self.transform.q, V3.j())
def get_local_axis_3(self):
return Q.rotate(self.transform.q, V3.k())
def get_world_anchor(self):
return Q.rotate(self.body.q, self.transform.r) + self.body.r
def get_world_axis_1(self):
return Q.rotate(self.transform.q, self.get_local_axis_1())
def get_world_axis_2(self):
return Q.rotate(self.transform.q, self.get_local_axis_2())
def get_world_axis_3(self):
return Q.rotate(self.transform.q, self.get_local_axis_3())
def get_world_arm(self):
return Q.rotate(self.body.q, self.transform.r)
class Joint(Constraint):
def __init__(self, joint_type, name, socket_body, plug_body):
super().__init__(joint_type, socket_body, plug_body)
self.name = name
self.socket = JointConnector(socket_body)
self.plug = JointConnector(plug_body)
self.error_reduction = 0.1
class BallJoint(Joint):
def __init__(self, name, socket_body, plug_body):
super().__init__('ball', name, socket_body, plug_body)
def compute_jacobians(self):
JA_v = M3.identity()
JA_w = - M3.star(self.socket.get_world_arm())
JB_v = -M3.identity()
JB_w = M3.star(self.plug.get_world_arm())
return JA_v, JA_w, JB_v, JB_w
def compute_error_terms(self, fps):
if fps < 0.0:
raise RuntimeError('compute_error_terms() Illegal fps value')
if fps > 200.0:
raise RuntimeWarning('compute_error_terms() Unlikely large fps value')
k_correction = fps * self.error_reduction
b = k_correction * (self.plug.get_world_anchor() - self.socket.get_world_anchor())
return b
@staticmethod
def dimensions():
return 3
class GraphEdge:
def __init__(self):
self.contacts = []
class MotionRecorder:
def __init__(self):
self.on = False
self.load = False # Boolean flag telling if channel data should be loaded when opening xml file
self.save = False # Boolean flag telling if channel data should be saved when savj g xml file
self.path = './'
self.filename = 'motion.xml'
self.storage = dict() # Maps body to keyframe motion
def record(self, time, body):
if body is None:
return
if body.name not in self.storage:
self.storage[body.name] = KeyframeMotion()
self.storage[body.name].create_keyframe(time, body.r, body.q)
def clear(self):
self.storage.clear()
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Motion recorder is on')
else:
print('Motion recorder is off')
class Profiler:
def __init__(self):
self.on = False
self.path = './'
self.filename = 'profiling.xml'
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Profiler is on')
else:
print('Profiler is off')
class SolverParameters:
def __init__(self):
self.total_time = 10.0 # The total allowed simulation time.
self.current_time = 0.0 # The current simulation time.
self.time_step = 0.001 # The time step size to use when taking one simulation solver step. Observe that there could be many steps for a single frame update.
self.fps = 60.0 # Number of frames to be made per second. This parameters helps control movie recording.
self.on = False # Boolean flag that turns simulation on and off.
self.mode = 'simulate' # The current mode of the solver, can be play (of recorded motion) or simulate.
self.load_restart = False # If true then body states will be initialized with restart file
self.save_restart = False # If true then simulator will save restart data after each simulation step.
self.restart_path = './'
self.restart_filename = 'restart.xml'
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Simulation solver is on')
else:
print('Simulation solver is off')
class Engine:
def __init__(self):
self.rigid_bodies = dict()
self.forces = dict()
self.shapes = dict()
self.contact_points = []
self.joints = dict()
self.joint_limits = []
self.joint_motors = []
self.material_library = MaterialLibrary()
self.solver_params = SolverParameters()
self.profiler = Profiler()
self.motion_recorder = MotionRecorder() | REESSimulation/types.py | import REESMath.vector3 as V3
import REESMath.quaternion as Q
import REESMath.coordsys as C
import REESMath.matrix3 as M3
import numpy as np
class MaterialBehaviour:
def __init__(self):
self.mu = V3.ones() # Coefficients of Friction
self.epsilon = 0.0 # Coefficient of restitution
class MaterialLibrary:
def __init__(self):
self.storage = dict()
self.storage[('default', 'default')] = MaterialBehaviour()
def get_behaviour(self, A, B):
key = (A, B) if A < B else (B, A)
if key in self.storage:
return self.storage[key]
return self.storage[('default', 'default')]
def exist_behaviour(self, A, B):
key = (A, B) if A < B else (B, A)
if key in self.storage:
return True
return False
def exist_material(self, name):
for key in self.storage:
if name in key:
return True
return False
class KeyframeMotion:
def __init__(self):
self.keyframes = []
def create_keyframe(self, time, r, q):
self.keyframes.append([time, r, q])
def clear(self):
self.keyframes.clear()
class ForceCalculator:
def __init__(self, force_type, name):
self.force_type = force_type
self.name = name
class Gravity(ForceCalculator):
def __init__(self, name):
super().__init__('Gravity', name)
self.g = 9.81 # Acceleration of gravity
self.up = V3.j() # Up direction
def compute(self, body, r, q, v, w):
F = - body.mass * self.g * self.up
T = V3.zero()
return F, T
class Damping(ForceCalculator):
def __init__(self, name):
super().__init__('Damping', name)
self.alpha = 0.001 # Linear damping
self.beta = 0.001 # Angular damping
def compute(self, body, r, q, v, w):
F = - v * self.alpha
T = - w * self.beta
return F, T
class Shape:
def __init__(self, name):
self.name = name
self.mesh = None # Polygonal mesh assumed to be in body frame coordinates
self.mass = 0.0 # Total mass of shape assuming unit-mass-density
self.inertia = V3.zero() # Body frame inertia tensor assumping unit-mass-density
self.r = V3.zero() # Translation from body frame to model frame
self.q = Q.identity() # Rotation from body frame to model frame
class RigidBody:
def __init__(self, name):
self.name = name
self.q = Q.identity() # Orientation stored as a quaternion
self.r = V3.zero() # Center of mass position
self.v = V3.zero() # Linear velocity
self.w = V3.zero() # Angular velocity
self.mass = 0.0 # Total mass
self.inertia = V3.zero() # Body frame inertia tensor
self.is_fixed = False
self.is_scripted = False
self.is_free = True
self.is_active = True
self.use_finite_update = True # Toggle between infinitesimal and finite updates of orientation
self.finite_update_rotation_axis = None # If a vector3 is given then finite updates are done wrt. this axis
self.forces = [] # External forces (like gravity and damping) acting on this body
self.material = 'default' # The material this rigid body is made up of
self.shape = None # Geometry/Shape of rigid body
self.kdop = None # World space kdop bvh
self.joints = [] # Joints connected to this body
self.scripted_motion = None
self.visual_material = None # string value that makes a reference to the name of the visual material to use when rendering this body.
class Constraint:
def __init__(self, txt, bodyA, bodyB):
self.type = txt
self.bodyA = bodyA
self.bodyB = bodyB
class ContactPoint(Constraint):
def __init__(self, bodyA, bodyB, position=V3.zero(), normal=V3.k(), gap=0.0):
super().__init__('ContactPoint', bodyA, bodyB)
if abs(1.0 - V3.norm(normal)) > 0.1:
raise RuntimeError('ContactPoint.init() was called with non-unit size normal')
self.p = position
self.n = normal
self.g = gap
def compute_jacobians(self):
rA = self.p - self.bodyA.r
rB = self.p - self.bodyB.r
s, t, n = V3.make_orthonormal_vectors(self.n)
JA_v = - np.array([n, s, t, V3.zero()], dtype=np.float64)
JA_w = - np.array([V3.cross(rA, n), V3.cross(rA, s), V3.cross(rA, t), n], dtype=np.float64)
JB_v = np.array([n, s, t, V3.zero()], dtype=np.float64)
JB_w = np.array([V3.cross(rB, n), V3.cross(rB, s), V3.cross(rB, t), n], dtype=np.float64)
return JA_v, JA_w, JB_v, JB_w
def compute_error_terms(self, fps, error_reduction):
if fps < 0.0:
raise RuntimeError('compute_error_terms() Illegal fps value')
if fps > 200.0:
raise RuntimeWarning('compute_error_terms() Unlikely large fps value')
k_correction = fps * error_reduction
b = k_correction * self.g
return V3.make_vec4(0, 0, b, 0)
@staticmethod
def dimensions():
return 4
class JointConnector:
def __init__(self, body):
self.body = body
self.transform = C.CoordSys()
def get_local_anchor(self):
return self.transform.r
def get_local_axis_1(self):
return Q.rotate(self.transform.q, V3.i())
def get_local_axis_2(self):
return Q.rotate(self.transform.q, V3.j())
def get_local_axis_3(self):
return Q.rotate(self.transform.q, V3.k())
def get_world_anchor(self):
return Q.rotate(self.body.q, self.transform.r) + self.body.r
def get_world_axis_1(self):
return Q.rotate(self.transform.q, self.get_local_axis_1())
def get_world_axis_2(self):
return Q.rotate(self.transform.q, self.get_local_axis_2())
def get_world_axis_3(self):
return Q.rotate(self.transform.q, self.get_local_axis_3())
def get_world_arm(self):
return Q.rotate(self.body.q, self.transform.r)
class Joint(Constraint):
def __init__(self, joint_type, name, socket_body, plug_body):
super().__init__(joint_type, socket_body, plug_body)
self.name = name
self.socket = JointConnector(socket_body)
self.plug = JointConnector(plug_body)
self.error_reduction = 0.1
class BallJoint(Joint):
def __init__(self, name, socket_body, plug_body):
super().__init__('ball', name, socket_body, plug_body)
def compute_jacobians(self):
JA_v = M3.identity()
JA_w = - M3.star(self.socket.get_world_arm())
JB_v = -M3.identity()
JB_w = M3.star(self.plug.get_world_arm())
return JA_v, JA_w, JB_v, JB_w
def compute_error_terms(self, fps):
if fps < 0.0:
raise RuntimeError('compute_error_terms() Illegal fps value')
if fps > 200.0:
raise RuntimeWarning('compute_error_terms() Unlikely large fps value')
k_correction = fps * self.error_reduction
b = k_correction * (self.plug.get_world_anchor() - self.socket.get_world_anchor())
return b
@staticmethod
def dimensions():
return 3
class GraphEdge:
def __init__(self):
self.contacts = []
class MotionRecorder:
def __init__(self):
self.on = False
self.load = False # Boolean flag telling if channel data should be loaded when opening xml file
self.save = False # Boolean flag telling if channel data should be saved when savj g xml file
self.path = './'
self.filename = 'motion.xml'
self.storage = dict() # Maps body to keyframe motion
def record(self, time, body):
if body is None:
return
if body.name not in self.storage:
self.storage[body.name] = KeyframeMotion()
self.storage[body.name].create_keyframe(time, body.r, body.q)
def clear(self):
self.storage.clear()
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Motion recorder is on')
else:
print('Motion recorder is off')
class Profiler:
def __init__(self):
self.on = False
self.path = './'
self.filename = 'profiling.xml'
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Profiler is on')
else:
print('Profiler is off')
class SolverParameters:
def __init__(self):
self.total_time = 10.0 # The total allowed simulation time.
self.current_time = 0.0 # The current simulation time.
self.time_step = 0.001 # The time step size to use when taking one simulation solver step. Observe that there could be many steps for a single frame update.
self.fps = 60.0 # Number of frames to be made per second. This parameters helps control movie recording.
self.on = False # Boolean flag that turns simulation on and off.
self.mode = 'simulate' # The current mode of the solver, can be play (of recorded motion) or simulate.
self.load_restart = False # If true then body states will be initialized with restart file
self.save_restart = False # If true then simulator will save restart data after each simulation step.
self.restart_path = './'
self.restart_filename = 'restart.xml'
def set_state(self, is_on):
self.on = is_on
if is_on:
print('Simulation solver is on')
else:
print('Simulation solver is off')
class Engine:
def __init__(self):
self.rigid_bodies = dict()
self.forces = dict()
self.shapes = dict()
self.contact_points = []
self.joints = dict()
self.joint_limits = []
self.joint_motors = []
self.material_library = MaterialLibrary()
self.solver_params = SolverParameters()
self.profiler = Profiler()
self.motion_recorder = MotionRecorder() | 0.814201 | 0.435361 |
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip_other_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "17", "format": "counter"}
:param entry_match_drop: {"optional": true, "size": "8", "type": "number", "oid": "6", "format": "counter"}
:param ip_port_block_free: {"optional": true, "size": "8", "type": "number", "oid": "15", "format": "counter"}
:param ip_node_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "13", "format": "counter"}
:param entry_list_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "10", "format": "counter"}
:param ip_node_alloc: {"optional": true, "size": "8", "type": "number", "oid": "11", "format": "counter"}
:param entry_added_shadow: {"optional": true, "size": "8", "type": "number", "oid": "20", "format": "counter"}
:param ip_port_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "16", "format": "counter"}
:param ip_other_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "19", "format": "counter"}
:param entry_removed_from_hw: {"optional": true, "size": "8", "type": "number", "oid": "4", "format": "counter"}
:param entry_deleted: {"optional": true, "size": "8", "type": "number", "oid": "2", "format": "counter"}
:param entry_list_alloc: {"optional": true, "size": "8", "type": "number", "oid": "8", "format": "counter"}
:param entry_list_free: {"optional": true, "size": "8", "type": "number", "oid": "9", "format": "counter"}
:param entry_added_to_hw: {"optional": true, "size": "8", "type": "number", "oid": "3", "format": "counter"}
:param ip_node_free: {"optional": true, "size": "8", "type": "number", "oid": "12", "format": "counter"}
:param entry_added: {"optional": true, "size": "8", "type": "number", "oid": "1", "format": "counter"}
:param ip_other_block_free: {"optional": true, "size": "8", "type": "number", "oid": "18", "format": "counter"}
:param entry_invalidated: {"optional": true, "size": "8", "type": "number", "oid": "21", "format": "counter"}
:param ip_port_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "14", "format": "counter"}
:param entry_match_drop_hw: {"optional": true, "size": "8", "type": "number", "oid": "7", "format": "counter"}
:param hw_out_of_entries: {"optional": true, "size": "8", "type": "number", "oid": "5", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.ip_other_block_alloc = ""
self.entry_match_drop = ""
self.ip_port_block_free = ""
self.ip_node_alloc_failure = ""
self.entry_list_alloc_failure = ""
self.ip_node_alloc = ""
self.entry_added_shadow = ""
self.ip_port_block_alloc_failure = ""
self.ip_other_block_alloc_failure = ""
self.entry_removed_from_hw = ""
self.entry_deleted = ""
self.entry_list_alloc = ""
self.entry_list_free = ""
self.entry_added_to_hw = ""
self.ip_node_free = ""
self.entry_added = ""
self.ip_other_block_free = ""
self.entry_invalidated = ""
self.ip_port_block_alloc = ""
self.entry_match_drop_hw = ""
self.hw_out_of_entries = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DdosProtection(A10BaseClass):
"""Class Description::
Statistics for the object ddos-protection.
Class ddos-protection supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ddos-protection/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ddos-protection"
self.a10_url="/axapi/v3/cgnv6/ddos-protection/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value) | a10sdk/core/cgnv6/cgnv6_ddos_protection_stats.py | from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip_other_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "17", "format": "counter"}
:param entry_match_drop: {"optional": true, "size": "8", "type": "number", "oid": "6", "format": "counter"}
:param ip_port_block_free: {"optional": true, "size": "8", "type": "number", "oid": "15", "format": "counter"}
:param ip_node_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "13", "format": "counter"}
:param entry_list_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "10", "format": "counter"}
:param ip_node_alloc: {"optional": true, "size": "8", "type": "number", "oid": "11", "format": "counter"}
:param entry_added_shadow: {"optional": true, "size": "8", "type": "number", "oid": "20", "format": "counter"}
:param ip_port_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "16", "format": "counter"}
:param ip_other_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "19", "format": "counter"}
:param entry_removed_from_hw: {"optional": true, "size": "8", "type": "number", "oid": "4", "format": "counter"}
:param entry_deleted: {"optional": true, "size": "8", "type": "number", "oid": "2", "format": "counter"}
:param entry_list_alloc: {"optional": true, "size": "8", "type": "number", "oid": "8", "format": "counter"}
:param entry_list_free: {"optional": true, "size": "8", "type": "number", "oid": "9", "format": "counter"}
:param entry_added_to_hw: {"optional": true, "size": "8", "type": "number", "oid": "3", "format": "counter"}
:param ip_node_free: {"optional": true, "size": "8", "type": "number", "oid": "12", "format": "counter"}
:param entry_added: {"optional": true, "size": "8", "type": "number", "oid": "1", "format": "counter"}
:param ip_other_block_free: {"optional": true, "size": "8", "type": "number", "oid": "18", "format": "counter"}
:param entry_invalidated: {"optional": true, "size": "8", "type": "number", "oid": "21", "format": "counter"}
:param ip_port_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "14", "format": "counter"}
:param entry_match_drop_hw: {"optional": true, "size": "8", "type": "number", "oid": "7", "format": "counter"}
:param hw_out_of_entries: {"optional": true, "size": "8", "type": "number", "oid": "5", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.ip_other_block_alloc = ""
self.entry_match_drop = ""
self.ip_port_block_free = ""
self.ip_node_alloc_failure = ""
self.entry_list_alloc_failure = ""
self.ip_node_alloc = ""
self.entry_added_shadow = ""
self.ip_port_block_alloc_failure = ""
self.ip_other_block_alloc_failure = ""
self.entry_removed_from_hw = ""
self.entry_deleted = ""
self.entry_list_alloc = ""
self.entry_list_free = ""
self.entry_added_to_hw = ""
self.ip_node_free = ""
self.entry_added = ""
self.ip_other_block_free = ""
self.entry_invalidated = ""
self.ip_port_block_alloc = ""
self.entry_match_drop_hw = ""
self.hw_out_of_entries = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DdosProtection(A10BaseClass):
"""Class Description::
Statistics for the object ddos-protection.
Class ddos-protection supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ddos-protection/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ddos-protection"
self.a10_url="/axapi/v3/cgnv6/ddos-protection/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value) | 0.825343 | 0.348451 |
class MonitoringStation:
"""This class represents a river level monitoring station"""
def __init__(self, station_id, measure_id, label, coord, typical_range,
river, town):
self.station_id = station_id
self.measure_id = measure_id
# Handle case of erroneous data where data system returns
# '[label, label]' rather than 'label'
self.name = label
if isinstance(label, list):
self.name = label[0]
self.coord = coord
self.typical_range = typical_range
self.river = river
self.town = town
self.latest_level = None
def __repr__(self):
d = "Station name: {}\n".format(self.name)
d += " id: {}\n".format(self.station_id)
d += " measure id: {}\n".format(self.measure_id)
d += " coordinate: {}\n".format(self.coord)
d += " town: {}\n".format(self.town)
d += " river: {}\n".format(self.river)
d += " typical range: {}".format(self.typical_range)
return d
# Check that the typical range values for a river are valid (lower value must be less than higher value)
def typical_range_consistent(self):
if self.typical_range == None:
return False
elif self.typical_range[0] <= self.typical_range[1]:
return True
else:
return False
def relative_water_level(self):
"""This function returns the relative water level, given by the ratio of the latest water level
minus the typical minimum to the typical range i.e. a ratio of 1.0 corresponds to a level at the
typical high and a ratio of 0.0 corresponds to a level at the typical low """
# If the typical range is consistent, it calculates the ratio, and if this causes an error e.g.
# a NoneType error, it creates an exception and returns None. Otherwise, it returns the ratio, and
# and if the range is not consistent, it returns None.
if self.typical_range_consistent() == True:
try:
ratio = (self.latest_level - self.typical_range[0])/(self.typical_range[1]-self.typical_range[0])
except TypeError:
return None
# Filter out anomalous values.
if ratio < 100:
return ratio
else:
return None
# Create a list for stations that do not have conistent typical ranges.
def inconsistent_typical_range_stations(stations):
inconsistent_data_list = []
for station in stations:
if station.typical_range_consistent() == False:
inconsistent_data_list.append(station)
return inconsistent_data_list | floodsystem/station.py | class MonitoringStation:
"""This class represents a river level monitoring station"""
def __init__(self, station_id, measure_id, label, coord, typical_range,
river, town):
self.station_id = station_id
self.measure_id = measure_id
# Handle case of erroneous data where data system returns
# '[label, label]' rather than 'label'
self.name = label
if isinstance(label, list):
self.name = label[0]
self.coord = coord
self.typical_range = typical_range
self.river = river
self.town = town
self.latest_level = None
def __repr__(self):
d = "Station name: {}\n".format(self.name)
d += " id: {}\n".format(self.station_id)
d += " measure id: {}\n".format(self.measure_id)
d += " coordinate: {}\n".format(self.coord)
d += " town: {}\n".format(self.town)
d += " river: {}\n".format(self.river)
d += " typical range: {}".format(self.typical_range)
return d
# Check that the typical range values for a river are valid (lower value must be less than higher value)
def typical_range_consistent(self):
if self.typical_range == None:
return False
elif self.typical_range[0] <= self.typical_range[1]:
return True
else:
return False
def relative_water_level(self):
"""This function returns the relative water level, given by the ratio of the latest water level
minus the typical minimum to the typical range i.e. a ratio of 1.0 corresponds to a level at the
typical high and a ratio of 0.0 corresponds to a level at the typical low """
# If the typical range is consistent, it calculates the ratio, and if this causes an error e.g.
# a NoneType error, it creates an exception and returns None. Otherwise, it returns the ratio, and
# and if the range is not consistent, it returns None.
if self.typical_range_consistent() == True:
try:
ratio = (self.latest_level - self.typical_range[0])/(self.typical_range[1]-self.typical_range[0])
except TypeError:
return None
# Filter out anomalous values.
if ratio < 100:
return ratio
else:
return None
# Create a list for stations that do not have conistent typical ranges.
def inconsistent_typical_range_stations(stations):
inconsistent_data_list = []
for station in stations:
if station.typical_range_consistent() == False:
inconsistent_data_list.append(station)
return inconsistent_data_list | 0.868227 | 0.582669 |
import logging
logging.info("Customizing with SlicerMorphRC.py")
# setting presets
moduleDir = os.path.dirname(slicer.util.modulePath("MorphPreferences"))
presetsScenePath = os.path.join(moduleDir, 'Resources/SM_presets.mrml')
# Read presets scene
customPresetsScene = slicer.vtkMRMLScene()
vrPropNode = slicer.vtkMRMLVolumePropertyNode()
customPresetsScene.RegisterNodeClass(vrPropNode)
customPresetsScene.SetURL(presetsScenePath)
customPresetsScene.Connect()
# Add presets to volume rendering logic
vrLogic = slicer.modules.volumerendering.logic()
presetsScene = vrLogic.GetPresetsScene()
vrNodes = customPresetsScene.GetNodesByClass("vtkMRMLVolumePropertyNode")
vrNodes.UnRegister(None)
for itemNum in range(vrNodes.GetNumberOfItems()):
node = vrNodes.GetItemAsObject(itemNum)
vrLogic.AddPreset(node)
#
#set the default volume storage to not compress by default
#
defaultVolumeStorageNode = slicer.vtkMRMLVolumeArchetypeStorageNode()
defaultVolumeStorageNode.SetUseCompression(0)
slicer.mrmlScene.AddDefaultNode(defaultVolumeStorageNode)
logging.info(" Volume nodes will be stored uncompressed by default")
#
#set the default volume storage to not compress by default
#
defaultVolumeStorageNode = slicer.vtkMRMLSegmentationStorageNode()
defaultVolumeStorageNode.SetUseCompression(0)
slicer.mrmlScene.AddDefaultNode(defaultVolumeStorageNode)
logging.info(" Segmentation nodes will be stored uncompressed")
#
#set the default model save format to ply (from vtk)
#
defaultModelStorageNode = slicer.vtkMRMLModelStorageNode()
defaultModelStorageNode.SetUseCompression(0)
defaultModelStorageNode.SetDefaultWriteFileExtension('ply')
slicer.mrmlScene.AddDefaultNode(defaultModelStorageNode)
#
#disable interpolation of the volumes by default
#
def NoInterpolate(caller,event):
for node in slicer.util.getNodes('*').values():
if node.IsA('vtkMRMLScalarVolumeDisplayNode'):
node.SetInterpolate(0)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, NoInterpolate)
#
#hide SLicer logo in module tab
#
slicer.util.findChild(slicer.util.mainWindow(), 'LogoLabel').visible = False
#
#collapse Data Probe tab by default to save space modules tab
#
slicer.util.findChild(slicer.util.mainWindow(), name='DataProbeCollapsibleWidget').collapsed = True
#
#set the default module from Welcome to Data
#
qt.QSettings().setValue("Modules/HomeModule", "Data")
#
# set volume rendering modes
#
settings = slicer.app.settings()
settings.setValue("VolumeRendering/RenderingMethod", "vtkMRMLGPURayCastVolumeRenderingDisplayNode")
settings.setValue("VolumeRendering/DefaultQuality", "Normal")
#
# orthographic view mode and turn on rulers
#
settings = slicer.app.settings()
settings.setValue("Default3DView/UseOrthographicProjection", True)
settings.setValue("Default3DView/RulerType", "thin")
settings.setValue("DefaultSliceView/RulerType", "thin")
#
# units settings
#
revisionUserSettings = slicer.app.revisionUserSettings()
revisionUserSettings.setValue("length/precision", 10)
#
# Keyboard shortcuts
#
#customize keystrokes for segment editor to cycle through effects
# ` goes to previous and ~ skips to next effect
def cycleEffect(delta=1):
try:
orderedNames = list(slicer.modules.SegmentEditorWidget.editor.effectNameOrder())
allNames = slicer.modules.SegmentEditorWidget.editor.availableEffectNames()
for name in allNames:
try:
orderedNames.index(name)
except ValueError:
orderedNames.append(name)
orderedNames.insert(0, None)
activeEffect = slicer.modules.SegmentEditorWidget.editor.activeEffect()
if activeEffect:
activeName = slicer.modules.SegmentEditorWidget.editor.activeEffect().name
else:
activeName = None
newIndex = (orderedNames.index(activeName) + delta) % len(orderedNames)
slicer.modules.SegmentEditorWidget.editor.setActiveEffectByName(orderedNames[newIndex])
except AttributeError:
# module not active
pass
def cycleEffectForward():
cycleEffect(1)
def cycleEffectBackward():
cycleEffect(-1)
# change the main window layout
def setLayout(layoutID):
slicer.app.layoutManager().setLayout(layoutID)
def setLayoutOneUpRedSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView)
def setLayoutOneUpYellowSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpYellowSliceView)
def setLayoutOneUpGreenSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpGreenSliceView)
def setLayoutFourUpView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
# operate on landmarks
def placeFiducial():
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetCurrentInteractionMode(interactionNode.Place)
cursorPosition = qt.QCursor.pos()
widget = slicer.app.widgetAt(cursorPosition)
mousePosition = widget.mapFromGlobal(cursorPosition)
interactor = widget.parent().interactor()
point = (mousePosition.x(), widget.height - mousePosition.y())
interactor.SetEventPosition(*point)
interactor.MouseMoveEvent()
interactor.LeftButtonPressEvent()
interactor.LeftButtonReleaseEvent()
def togglePlaceModePersistence():
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetPlaceModePersistence(not interactionNode.GetPlaceModePersistence())
def toggleMarkupLocks():
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
placeNode = slicer.mrmlScene.GetNodeByID(selectionNode.GetActivePlaceNodeID())
if placeNode:
wasLocked = placeNode.GetNthControlPointLocked(0)
wasModifying = placeNode.StartModify()
for index in range(placeNode.GetNumberOfControlPoints()):
placeNode.SetNthControlPointLocked(index, not wasLocked)
placeNode.EndModify(wasModifying)
# setup shortcut keys
shortcuts = [
('`', cycleEffectForward),
('~', cycleEffectBackward),
('z', setLayoutOneUpRedSliceView),
('x', setLayoutOneUpYellowSliceView),
('c', setLayoutOneUpGreenSliceView),
('y', setLayoutFourUpView),
('p', placeFiducial),
('t', togglePlaceModePersistence),
('l', toggleMarkupLocks),
]
for (shortcutKey, callback) in shortcuts:
shortcut = qt.QShortcut(slicer.util.mainWindow())
shortcut.setKey(qt.QKeySequence(shortcutKey))
if not shortcut.connect( 'activated()', callback):
print(f"Couldn't set up {shortcutKey}")
logging.info(f" {len(shortcuts)} keyboard shortcuts installed")
logging.info("Done customizing with SlicerMorphRC.py")
logging.info("On first load of customization, restart Slicer to take effect.") | MorphPreferences/Resources/SlicerMorphRC.py | import logging
logging.info("Customizing with SlicerMorphRC.py")
# setting presets
moduleDir = os.path.dirname(slicer.util.modulePath("MorphPreferences"))
presetsScenePath = os.path.join(moduleDir, 'Resources/SM_presets.mrml')
# Read presets scene
customPresetsScene = slicer.vtkMRMLScene()
vrPropNode = slicer.vtkMRMLVolumePropertyNode()
customPresetsScene.RegisterNodeClass(vrPropNode)
customPresetsScene.SetURL(presetsScenePath)
customPresetsScene.Connect()
# Add presets to volume rendering logic
vrLogic = slicer.modules.volumerendering.logic()
presetsScene = vrLogic.GetPresetsScene()
vrNodes = customPresetsScene.GetNodesByClass("vtkMRMLVolumePropertyNode")
vrNodes.UnRegister(None)
for itemNum in range(vrNodes.GetNumberOfItems()):
node = vrNodes.GetItemAsObject(itemNum)
vrLogic.AddPreset(node)
#
#set the default volume storage to not compress by default
#
defaultVolumeStorageNode = slicer.vtkMRMLVolumeArchetypeStorageNode()
defaultVolumeStorageNode.SetUseCompression(0)
slicer.mrmlScene.AddDefaultNode(defaultVolumeStorageNode)
logging.info(" Volume nodes will be stored uncompressed by default")
#
#set the default volume storage to not compress by default
#
defaultVolumeStorageNode = slicer.vtkMRMLSegmentationStorageNode()
defaultVolumeStorageNode.SetUseCompression(0)
slicer.mrmlScene.AddDefaultNode(defaultVolumeStorageNode)
logging.info(" Segmentation nodes will be stored uncompressed")
#
#set the default model save format to ply (from vtk)
#
defaultModelStorageNode = slicer.vtkMRMLModelStorageNode()
defaultModelStorageNode.SetUseCompression(0)
defaultModelStorageNode.SetDefaultWriteFileExtension('ply')
slicer.mrmlScene.AddDefaultNode(defaultModelStorageNode)
#
#disable interpolation of the volumes by default
#
def NoInterpolate(caller,event):
for node in slicer.util.getNodes('*').values():
if node.IsA('vtkMRMLScalarVolumeDisplayNode'):
node.SetInterpolate(0)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, NoInterpolate)
#
#hide SLicer logo in module tab
#
slicer.util.findChild(slicer.util.mainWindow(), 'LogoLabel').visible = False
#
#collapse Data Probe tab by default to save space modules tab
#
slicer.util.findChild(slicer.util.mainWindow(), name='DataProbeCollapsibleWidget').collapsed = True
#
#set the default module from Welcome to Data
#
qt.QSettings().setValue("Modules/HomeModule", "Data")
#
# set volume rendering modes
#
settings = slicer.app.settings()
settings.setValue("VolumeRendering/RenderingMethod", "vtkMRMLGPURayCastVolumeRenderingDisplayNode")
settings.setValue("VolumeRendering/DefaultQuality", "Normal")
#
# orthographic view mode and turn on rulers
#
settings = slicer.app.settings()
settings.setValue("Default3DView/UseOrthographicProjection", True)
settings.setValue("Default3DView/RulerType", "thin")
settings.setValue("DefaultSliceView/RulerType", "thin")
#
# units settings
#
revisionUserSettings = slicer.app.revisionUserSettings()
revisionUserSettings.setValue("length/precision", 10)
#
# Keyboard shortcuts
#
#customize keystrokes for segment editor to cycle through effects
# ` goes to previous and ~ skips to next effect
def cycleEffect(delta=1):
try:
orderedNames = list(slicer.modules.SegmentEditorWidget.editor.effectNameOrder())
allNames = slicer.modules.SegmentEditorWidget.editor.availableEffectNames()
for name in allNames:
try:
orderedNames.index(name)
except ValueError:
orderedNames.append(name)
orderedNames.insert(0, None)
activeEffect = slicer.modules.SegmentEditorWidget.editor.activeEffect()
if activeEffect:
activeName = slicer.modules.SegmentEditorWidget.editor.activeEffect().name
else:
activeName = None
newIndex = (orderedNames.index(activeName) + delta) % len(orderedNames)
slicer.modules.SegmentEditorWidget.editor.setActiveEffectByName(orderedNames[newIndex])
except AttributeError:
# module not active
pass
def cycleEffectForward():
cycleEffect(1)
def cycleEffectBackward():
cycleEffect(-1)
# change the main window layout
def setLayout(layoutID):
slicer.app.layoutManager().setLayout(layoutID)
def setLayoutOneUpRedSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView)
def setLayoutOneUpYellowSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpYellowSliceView)
def setLayoutOneUpGreenSliceView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpGreenSliceView)
def setLayoutFourUpView():
setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
# operate on landmarks
def placeFiducial():
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetCurrentInteractionMode(interactionNode.Place)
cursorPosition = qt.QCursor.pos()
widget = slicer.app.widgetAt(cursorPosition)
mousePosition = widget.mapFromGlobal(cursorPosition)
interactor = widget.parent().interactor()
point = (mousePosition.x(), widget.height - mousePosition.y())
interactor.SetEventPosition(*point)
interactor.MouseMoveEvent()
interactor.LeftButtonPressEvent()
interactor.LeftButtonReleaseEvent()
def togglePlaceModePersistence():
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetPlaceModePersistence(not interactionNode.GetPlaceModePersistence())
def toggleMarkupLocks():
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
placeNode = slicer.mrmlScene.GetNodeByID(selectionNode.GetActivePlaceNodeID())
if placeNode:
wasLocked = placeNode.GetNthControlPointLocked(0)
wasModifying = placeNode.StartModify()
for index in range(placeNode.GetNumberOfControlPoints()):
placeNode.SetNthControlPointLocked(index, not wasLocked)
placeNode.EndModify(wasModifying)
# setup shortcut keys
shortcuts = [
('`', cycleEffectForward),
('~', cycleEffectBackward),
('z', setLayoutOneUpRedSliceView),
('x', setLayoutOneUpYellowSliceView),
('c', setLayoutOneUpGreenSliceView),
('y', setLayoutFourUpView),
('p', placeFiducial),
('t', togglePlaceModePersistence),
('l', toggleMarkupLocks),
]
for (shortcutKey, callback) in shortcuts:
shortcut = qt.QShortcut(slicer.util.mainWindow())
shortcut.setKey(qt.QKeySequence(shortcutKey))
if not shortcut.connect( 'activated()', callback):
print(f"Couldn't set up {shortcutKey}")
logging.info(f" {len(shortcuts)} keyboard shortcuts installed")
logging.info("Done customizing with SlicerMorphRC.py")
logging.info("On first load of customization, restart Slicer to take effect.") | 0.243013 | 0.145996 |
import csv
import io
import logging
import re
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import (
get_fbank,
get_waveform,
read_from_stored_zip,
is_npy_data,
is_sf_audio_data,
parse_path,
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS,
)
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.data_cfg import S2TDataConfig
logger = logging.getLogger(__name__)
def get_features_from_npy_or_audio(path):
ext = Path(path).suffix
if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f'Unsupported file format for "{path}"')
return np.load(path) if ext == ".npy" else get_fbank(path)
def get_features_or_waveform_from_stored_zip(
path, byte_offset, byte_size, need_waveform=False, use_sample_rate=None,
):
assert path.endswith(".zip")
data = read_from_stored_zip(path, byte_offset, byte_size)
f = io.BytesIO(data)
if is_npy_data(data):
features_or_waveform = np.load(f)
elif is_sf_audio_data(data):
features_or_waveform = \
get_waveform(
f, always_2d=False, output_sample_rate=use_sample_rate
)[0] if need_waveform else get_fbank(f)
else:
raise ValueError(f'Unknown file format for "{path}"')
return features_or_waveform
def get_features_or_waveform(
path: str, need_waveform=False, use_sample_rate=None
):
"""Get speech features from .npy file or waveform from .wav/.flac file.
The file may be inside an uncompressed ZIP file and is accessed via byte
offset and length.
Args:
path (str): File path in the format of "<.npy/.wav/.flac path>" or
"<zip path>:<byte offset>:<byte length>".
need_waveform (bool): return waveform instead of features.
use_sample_rate (int): change sample rate for the input wave file
Returns:
features_or_waveform (numpy.ndarray): speech features or waveform.
"""
_path, slice_ptr = parse_path(path)
if len(slice_ptr) == 0:
if need_waveform:
return get_waveform(
_path, always_2d=False, output_sample_rate=use_sample_rate
)[0]
return get_features_from_npy_or_audio(_path)
elif len(slice_ptr) == 2:
features_or_waveform = get_features_or_waveform_from_stored_zip(
_path, slice_ptr[0], slice_ptr[1], need_waveform=need_waveform,
use_sample_rate=use_sample_rate
)
else:
raise ValueError(f"Invalid path: {path}")
return features_or_waveform
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
) -> torch.Tensor:
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
@dataclass
class SpeechToTextDatasetItem(object):
index: int
source: torch.Tensor
target: Optional[torch.Tensor] = None
speaker_id: Optional[int] = None
class SpeechToTextDataset(FairseqDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None
):
self.split, self.is_train_split = split, is_train_split
self.cfg = cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.speakers = speakers
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.cfg.get_feature_transforms(split, is_train_split)
)
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
self.n_frames_per_step = n_frames_per_step
self.speaker_to_id = speaker_to_id
self.tgt_lens = self.get_tgt_lens_and_check_oov()
logger.info(self.__repr__())
def get_tgt_lens_and_check_oov(self):
if self.tgt_texts is None:
return [0 for _ in range(self.n_samples)]
tgt_lens = []
n_tokens, n_oov_tokens = 0, 0
for i in range(self.n_samples):
tokenized = self.get_tokenized_tgt_text(i).split(" ")
oov_tokens = [
t
for t in tokenized
if self.tgt_dict.index(t) == self.tgt_dict.unk_index
]
n_tokens += len(tokenized)
n_oov_tokens += len(oov_tokens)
tgt_lens.append(len(tokenized))
logger.info(f"'{self.split}' has {n_oov_tokens / n_tokens * 100:.2f}% OOV")
return tgt_lens
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples:_}, '
f"prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms}, "
f"n_frames_per_step={self.n_frames_per_step}"
)
@classmethod
def is_lang_tag(cls, token):
pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)")
return re.match(pattern, token)
def check_tgt_lang_tag(self):
if self.cfg.prepend_tgt_lang_tag:
assert self.tgt_langs is not None and self.tgt_dict is not None
tgt_lang_tags = [
self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs)
]
assert all(t in self.tgt_dict for t in tgt_lang_tags)
@classmethod
def tokenize(cls, tokenizer, text: str):
return text if tokenizer is None else tokenizer.encode(text)
def get_tokenized_tgt_text(self, index: int):
text = self.tokenize(self.pre_tokenizer, self.tgt_texts[index])
text = self.tokenize(self.bpe_tokenizer, text)
return text
def pack_frames(self, feature: torch.Tensor):
if self.n_frames_per_step == 1:
return feature
n_packed_frames = feature.shape[0] // self.n_frames_per_step
feature = feature[:self.n_frames_per_step * n_packed_frames]
return feature.reshape(n_packed_frames, -1)
@classmethod
def get_lang_tag_idx(cls, lang: str, dictionary: Dictionary):
lang_tag_idx = dictionary.index(cls.LANG_TAG_TEMPLATE.format(lang))
assert lang_tag_idx != dictionary.unk()
return lang_tag_idx
def __getitem__(self, index: int) -> SpeechToTextDatasetItem:
source = get_features_or_waveform(
self.audio_paths[index],
need_waveform=self.cfg.use_audio_input,
use_sample_rate=self.cfg.use_sample_rate,
)
if self.feature_transforms is not None:
assert not self.cfg.use_audio_input
source = self.feature_transforms(source)
source = torch.from_numpy(source).float()
source = self.pack_frames(source)
target = None
if self.tgt_texts is not None:
tokenized = self.get_tokenized_tgt_text(index)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
if self.cfg.prepend_tgt_lang_tag:
lang_tag_idx = self.get_lang_tag_idx(
self.tgt_langs[index], self.tgt_dict
)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
speaker_id = None
if self.speaker_to_id is not None:
speaker_id = self.speaker_to_id[self.speakers[index]]
return SpeechToTextDatasetItem(
index=index, source=source, target=target, speaker_id=speaker_id
)
def __len__(self):
return self.n_samples
def collater(
self, samples: List[SpeechToTextDatasetItem], return_order: bool = False
) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([x.index for x in samples], dtype=torch.long)
frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input)
# sort samples by descending number of frames
n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[x.target.size(0) for x in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(x.target.size(0) for x in samples)
speaker = None
if self.speaker_to_id is not None:
speaker = torch.tensor(
[s.speaker_id for s in samples], dtype=torch.long
).index_select(0, order).view(-1, 1)
net_input = {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
}
out = {
"id": indices,
"net_input": net_input,
"speaker": speaker,
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
}
if return_order:
out["order"] = order
return out
def num_tokens(self, index):
return self.n_frames[index]
def size(self, index):
return self.n_frames[index], self.tgt_lens[index]
@property
def sizes(self):
return np.array(self.n_frames)
@property
def can_reuse_epoch_itr_across_epochs(self):
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
# first by descending order of # of frames then by original/random order
order.append([-n for n in self.n_frames])
return np.lexsort(order)
def prefetch(self, indices):
raise False
class SpeechToTextDatasetCreator(object):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[Dict],
cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id
) -> SpeechToTextDataset:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
return SpeechToTextDataset(
split_name,
is_train_split,
cfg,
audio_paths,
n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id
)
@classmethod
def get_size_ratios(
cls, datasets: List[SpeechToTextDataset], alpha: float = 1.0
) -> List[float]:
"""Size ratios for temperature-based sampling
(https://arxiv.org/abs/1907.05019)"""
id_to_lp, lp_to_sz = {}, defaultdict(int)
for ds in datasets:
lang_pairs = {f"{s}->{t}" for s, t in zip(ds.src_langs, ds.tgt_langs)}
assert len(lang_pairs) == 1
lang_pair = list(lang_pairs)[0]
id_to_lp[ds.split] = lang_pair
lp_to_sz[lang_pair] += sum(ds.n_frames)
sz_sum = sum(v for v in lp_to_sz.values())
lp_to_prob = {k: v / sz_sum for k, v in lp_to_sz.items()}
lp_to_tgt_prob = {k: v ** alpha for k, v in lp_to_prob.items()}
prob_sum = sum(v for v in lp_to_tgt_prob.values())
lp_to_tgt_prob = {k: v / prob_sum for k, v in lp_to_tgt_prob.items()}
lp_to_sz_ratio = {
k: (lp_to_tgt_prob[k] * sz_sum) / v for k, v in lp_to_sz.items()
}
size_ratio = [lp_to_sz_ratio[id_to_lp[ds.split]] for ds in datasets]
p_formatted = {
k: f"{lp_to_prob[k]:.3f}->{lp_to_tgt_prob[k]:.3f}" for k in lp_to_sz
}
logger.info(f"sampling probability balancing: {p_formatted}")
sr_formatted = {ds.split: f"{r:.3f}" for ds, r in zip(datasets, size_ratio)}
logger.info(f"balanced sampling size ratio: {sr_formatted}")
return size_ratio
@classmethod
def _load_samples_from_tsv(cls, root: str, split: str):
tsv_path = Path(root) / f"{split}.tsv"
if not tsv_path.is_file():
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples = [dict(e) for e in reader]
if len(samples) == 0:
raise ValueError(f"Empty manifest: {tsv_path}")
return samples
@classmethod
def _from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
split: str,
tgt_dict,
is_train_split: bool,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id
) -> SpeechToTextDataset:
samples = cls._load_samples_from_tsv(root, split)
return cls._from_list(
split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer,
bpe_tokenizer, n_frames_per_step, speaker_to_id
)
@classmethod
def from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
n_frames_per_step: int = 1,
speaker_to_id=None
) -> SpeechToTextDataset:
datasets = [
cls._from_tsv(
root, cfg, split, tgt_dict, is_train_split, pre_tokenizer,
bpe_tokenizer, n_frames_per_step, speaker_to_id
)
for split in splits.split(",")
]
if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for r, d in zip(size_ratios, datasets)
]
return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0] | fairseq/fairseq/data/audio/speech_to_text_dataset.py |
import csv
import io
import logging
import re
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import (
get_fbank,
get_waveform,
read_from_stored_zip,
is_npy_data,
is_sf_audio_data,
parse_path,
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS,
)
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.data_cfg import S2TDataConfig
logger = logging.getLogger(__name__)
def get_features_from_npy_or_audio(path):
ext = Path(path).suffix
if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f'Unsupported file format for "{path}"')
return np.load(path) if ext == ".npy" else get_fbank(path)
def get_features_or_waveform_from_stored_zip(
path, byte_offset, byte_size, need_waveform=False, use_sample_rate=None,
):
assert path.endswith(".zip")
data = read_from_stored_zip(path, byte_offset, byte_size)
f = io.BytesIO(data)
if is_npy_data(data):
features_or_waveform = np.load(f)
elif is_sf_audio_data(data):
features_or_waveform = \
get_waveform(
f, always_2d=False, output_sample_rate=use_sample_rate
)[0] if need_waveform else get_fbank(f)
else:
raise ValueError(f'Unknown file format for "{path}"')
return features_or_waveform
def get_features_or_waveform(
path: str, need_waveform=False, use_sample_rate=None
):
"""Get speech features from .npy file or waveform from .wav/.flac file.
The file may be inside an uncompressed ZIP file and is accessed via byte
offset and length.
Args:
path (str): File path in the format of "<.npy/.wav/.flac path>" or
"<zip path>:<byte offset>:<byte length>".
need_waveform (bool): return waveform instead of features.
use_sample_rate (int): change sample rate for the input wave file
Returns:
features_or_waveform (numpy.ndarray): speech features or waveform.
"""
_path, slice_ptr = parse_path(path)
if len(slice_ptr) == 0:
if need_waveform:
return get_waveform(
_path, always_2d=False, output_sample_rate=use_sample_rate
)[0]
return get_features_from_npy_or_audio(_path)
elif len(slice_ptr) == 2:
features_or_waveform = get_features_or_waveform_from_stored_zip(
_path, slice_ptr[0], slice_ptr[1], need_waveform=need_waveform,
use_sample_rate=use_sample_rate
)
else:
raise ValueError(f"Invalid path: {path}")
return features_or_waveform
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
) -> torch.Tensor:
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
@dataclass
class SpeechToTextDatasetItem(object):
index: int
source: torch.Tensor
target: Optional[torch.Tensor] = None
speaker_id: Optional[int] = None
class SpeechToTextDataset(FairseqDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None
):
self.split, self.is_train_split = split, is_train_split
self.cfg = cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.speakers = speakers
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.cfg.get_feature_transforms(split, is_train_split)
)
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
self.n_frames_per_step = n_frames_per_step
self.speaker_to_id = speaker_to_id
self.tgt_lens = self.get_tgt_lens_and_check_oov()
logger.info(self.__repr__())
def get_tgt_lens_and_check_oov(self):
if self.tgt_texts is None:
return [0 for _ in range(self.n_samples)]
tgt_lens = []
n_tokens, n_oov_tokens = 0, 0
for i in range(self.n_samples):
tokenized = self.get_tokenized_tgt_text(i).split(" ")
oov_tokens = [
t
for t in tokenized
if self.tgt_dict.index(t) == self.tgt_dict.unk_index
]
n_tokens += len(tokenized)
n_oov_tokens += len(oov_tokens)
tgt_lens.append(len(tokenized))
logger.info(f"'{self.split}' has {n_oov_tokens / n_tokens * 100:.2f}% OOV")
return tgt_lens
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples:_}, '
f"prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms}, "
f"n_frames_per_step={self.n_frames_per_step}"
)
@classmethod
def is_lang_tag(cls, token):
pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)")
return re.match(pattern, token)
def check_tgt_lang_tag(self):
if self.cfg.prepend_tgt_lang_tag:
assert self.tgt_langs is not None and self.tgt_dict is not None
tgt_lang_tags = [
self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs)
]
assert all(t in self.tgt_dict for t in tgt_lang_tags)
@classmethod
def tokenize(cls, tokenizer, text: str):
return text if tokenizer is None else tokenizer.encode(text)
def get_tokenized_tgt_text(self, index: int):
text = self.tokenize(self.pre_tokenizer, self.tgt_texts[index])
text = self.tokenize(self.bpe_tokenizer, text)
return text
def pack_frames(self, feature: torch.Tensor):
if self.n_frames_per_step == 1:
return feature
n_packed_frames = feature.shape[0] // self.n_frames_per_step
feature = feature[:self.n_frames_per_step * n_packed_frames]
return feature.reshape(n_packed_frames, -1)
@classmethod
def get_lang_tag_idx(cls, lang: str, dictionary: Dictionary):
lang_tag_idx = dictionary.index(cls.LANG_TAG_TEMPLATE.format(lang))
assert lang_tag_idx != dictionary.unk()
return lang_tag_idx
def __getitem__(self, index: int) -> SpeechToTextDatasetItem:
source = get_features_or_waveform(
self.audio_paths[index],
need_waveform=self.cfg.use_audio_input,
use_sample_rate=self.cfg.use_sample_rate,
)
if self.feature_transforms is not None:
assert not self.cfg.use_audio_input
source = self.feature_transforms(source)
source = torch.from_numpy(source).float()
source = self.pack_frames(source)
target = None
if self.tgt_texts is not None:
tokenized = self.get_tokenized_tgt_text(index)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
if self.cfg.prepend_tgt_lang_tag:
lang_tag_idx = self.get_lang_tag_idx(
self.tgt_langs[index], self.tgt_dict
)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
speaker_id = None
if self.speaker_to_id is not None:
speaker_id = self.speaker_to_id[self.speakers[index]]
return SpeechToTextDatasetItem(
index=index, source=source, target=target, speaker_id=speaker_id
)
def __len__(self):
return self.n_samples
def collater(
self, samples: List[SpeechToTextDatasetItem], return_order: bool = False
) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([x.index for x in samples], dtype=torch.long)
frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input)
# sort samples by descending number of frames
n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[x.target.size(0) for x in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(x.target.size(0) for x in samples)
speaker = None
if self.speaker_to_id is not None:
speaker = torch.tensor(
[s.speaker_id for s in samples], dtype=torch.long
).index_select(0, order).view(-1, 1)
net_input = {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
}
out = {
"id": indices,
"net_input": net_input,
"speaker": speaker,
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
}
if return_order:
out["order"] = order
return out
def num_tokens(self, index):
return self.n_frames[index]
def size(self, index):
return self.n_frames[index], self.tgt_lens[index]
@property
def sizes(self):
return np.array(self.n_frames)
@property
def can_reuse_epoch_itr_across_epochs(self):
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
# first by descending order of # of frames then by original/random order
order.append([-n for n in self.n_frames])
return np.lexsort(order)
def prefetch(self, indices):
raise False
class SpeechToTextDatasetCreator(object):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[Dict],
cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id
) -> SpeechToTextDataset:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
return SpeechToTextDataset(
split_name,
is_train_split,
cfg,
audio_paths,
n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id
)
@classmethod
def get_size_ratios(
cls, datasets: List[SpeechToTextDataset], alpha: float = 1.0
) -> List[float]:
"""Size ratios for temperature-based sampling
(https://arxiv.org/abs/1907.05019)"""
id_to_lp, lp_to_sz = {}, defaultdict(int)
for ds in datasets:
lang_pairs = {f"{s}->{t}" for s, t in zip(ds.src_langs, ds.tgt_langs)}
assert len(lang_pairs) == 1
lang_pair = list(lang_pairs)[0]
id_to_lp[ds.split] = lang_pair
lp_to_sz[lang_pair] += sum(ds.n_frames)
sz_sum = sum(v for v in lp_to_sz.values())
lp_to_prob = {k: v / sz_sum for k, v in lp_to_sz.items()}
lp_to_tgt_prob = {k: v ** alpha for k, v in lp_to_prob.items()}
prob_sum = sum(v for v in lp_to_tgt_prob.values())
lp_to_tgt_prob = {k: v / prob_sum for k, v in lp_to_tgt_prob.items()}
lp_to_sz_ratio = {
k: (lp_to_tgt_prob[k] * sz_sum) / v for k, v in lp_to_sz.items()
}
size_ratio = [lp_to_sz_ratio[id_to_lp[ds.split]] for ds in datasets]
p_formatted = {
k: f"{lp_to_prob[k]:.3f}->{lp_to_tgt_prob[k]:.3f}" for k in lp_to_sz
}
logger.info(f"sampling probability balancing: {p_formatted}")
sr_formatted = {ds.split: f"{r:.3f}" for ds, r in zip(datasets, size_ratio)}
logger.info(f"balanced sampling size ratio: {sr_formatted}")
return size_ratio
@classmethod
def _load_samples_from_tsv(cls, root: str, split: str):
tsv_path = Path(root) / f"{split}.tsv"
if not tsv_path.is_file():
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples = [dict(e) for e in reader]
if len(samples) == 0:
raise ValueError(f"Empty manifest: {tsv_path}")
return samples
@classmethod
def _from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
split: str,
tgt_dict,
is_train_split: bool,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id
) -> SpeechToTextDataset:
samples = cls._load_samples_from_tsv(root, split)
return cls._from_list(
split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer,
bpe_tokenizer, n_frames_per_step, speaker_to_id
)
@classmethod
def from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
n_frames_per_step: int = 1,
speaker_to_id=None
) -> SpeechToTextDataset:
datasets = [
cls._from_tsv(
root, cfg, split, tgt_dict, is_train_split, pre_tokenizer,
bpe_tokenizer, n_frames_per_step, speaker_to_id
)
for split in splits.split(",")
]
if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for r, d in zip(size_ratios, datasets)
]
return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0] | 0.840046 | 0.296361 |
import time
import subprocess
import logging
logger = logging.getLogger()
import os.path
import subprocess
import hostapd
from wlantest import Wlantest
def hs20_ap_params():
params = hostapd.wpa2_params(ssid="test-hs20")
params['wpa_key_mgmt'] = "WPA-EAP"
params['ieee80211w'] = "1"
params['ieee8021x'] = "1"
params['auth_server_addr'] = "127.0.0.1"
params['auth_server_port'] = "1812"
params['auth_server_shared_secret'] = "radius"
params['interworking'] = "1"
params['access_network_type'] = "14"
params['internet'] = "1"
params['asra'] = "0"
params['esr'] = "0"
params['uesa'] = "0"
params['venue_group'] = "7"
params['venue_type'] = "1"
params['venue_name'] = [ "eng:Example venue", "fin:Esimerkkipaikka" ]
params['roaming_consortium'] = [ "112233", "1020304050", "010203040506",
"fedcba" ]
params['domain_name'] = "example.com,another.example.com"
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]",
"0,another.example.com" ]
params['hs20'] = "1"
params['hs20_wan_metrics'] = "01:8000:1000:80:240:3000"
params['hs20_conn_capab'] = [ "1:0:2", "6:22:1", "17:5060:0" ]
params['hs20_operating_class'] = "5173"
params['anqp_3gpp_cell_net'] = "244,91"
return params
def interworking_select(dev, bssid, type=None, no_match=False, freq=None):
dev.dump_monitor()
freq_extra = " freq=" + freq if freq else ""
dev.request("INTERWORKING_SELECT" + freq_extra)
ev = dev.wait_event(["INTERWORKING-AP", "INTERWORKING-NO-MATCH"],
timeout=15)
if ev is None:
raise Exception("Network selection timed out");
if no_match:
if "INTERWORKING-NO-MATCH" not in ev:
raise Exception("Unexpected network match")
return
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching network not found")
if bssid and bssid not in ev:
raise Exception("Unexpected BSSID in match")
if type and "type=" + type not in ev:
raise Exception("Network type not recognized correctly")
def check_sp_type(dev, sp_type):
type = dev.get_status_field("sp_type")
if type is None:
raise Exception("sp_type not available")
if type != sp_type:
raise Exception("sp_type did not indicate home network")
def hlr_auc_gw_available():
if not os.path.exists("/tmp/hlr_auc_gw.sock"):
logger.info("No hlr_auc_gw available");
return False
if not os.path.exists("../../hostapd/hlr_auc_gw"):
logger.info("No hlr_auc_gw available");
return False
return True
def interworking_ext_sim_connect(dev, bssid, method):
dev.request("INTERWORKING_CONNECT " + bssid)
ev = dev.wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Network connected timed out")
if "(" + method + ")" not in ev:
raise Exception("Unexpected EAP method selection")
ev = dev.wait_event(["CTRL-REQ-SIM"], timeout=15)
if ev is None:
raise Exception("Wait for external SIM processing request timed out")
p = ev.split(':', 2)
if p[1] != "GSM-AUTH":
raise Exception("Unexpected CTRL-REQ-SIM type")
id = p[0].split('-')[3]
rand = p[2].split(' ')[0]
res = subprocess.check_output(["../../hostapd/hlr_auc_gw",
"-m",
"auth_serv/hlr_auc_gw.milenage_db",
"GSM-AUTH-REQ 232010000000000 " + rand])
if "GSM-AUTH-RESP" not in res:
raise Exception("Unexpected hlr_auc_gw response")
resp = res.split(' ')[2].rstrip()
dev.request("CTRL-RSP-SIM-" + id + ":GSM-AUTH:" + resp)
ev = dev.wait_event(["CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
def interworking_connect(dev, bssid, method):
dev.request("INTERWORKING_CONNECT " + bssid)
ev = dev.wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Network connected timed out")
if "(" + method + ")" not in ev:
raise Exception("Unexpected EAP method selection")
ev = dev.wait_event(["CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
def check_probe_resp(wt, bssid_unexpected, bssid_expected):
if bssid_unexpected:
count = wt.get_bss_counter("probe_response", bssid_unexpected)
if count > 0:
raise Exception("Unexpected Probe Response frame from AP")
if bssid_expected:
count = wt.get_bss_counter("probe_response", bssid_expected)
if count == 0:
raise Exception("No Probe Response frame from AP")
def test_ap_anqp_sharing(dev, apdev):
"""ANQP sharing within ESS and explicit unshare"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
bssid2 = apdev[1]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]" ]
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "example.com" })
logger.info("Normal network selection with shared ANQP results")
interworking_select(dev[0], None, "home", freq="2412")
dev[0].dump_monitor()
res1 = dev[0].get_bss(bssid)
res2 = dev[0].get_bss(bssid2)
if res1['anqp_nai_realm'] != res2['anqp_nai_realm']:
raise Exception("ANQP results were not shared between BSSes")
logger.info("Explicit ANQP request to unshare ANQP results")
dev[0].request("ANQP_GET " + bssid + " 263")
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("ANQP operation timed out")
dev[0].request("ANQP_GET " + bssid2 + " 263")
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("ANQP operation timed out")
res1 = dev[0].get_bss(bssid)
res2 = dev[0].get_bss(bssid2)
if res1['anqp_nai_realm'] == res2['anqp_nai_realm']:
raise Exception("ANQP results were not unshared")
def test_ap_interworking_scan_filtering(dev, apdev):
"""Interworking scan filtering with HESSID and access network type"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
ssid = "test-hs20-ap1"
params['ssid'] = ssid
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
bssid2 = apdev[1]['bssid']
params = hs20_ap_params()
ssid2 = "test-hs20-ap2"
params['ssid'] = ssid2
params['hessid'] = bssid2
params['access_network_type'] = "1"
del params['venue_group']
del params['venue_type']
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].hs20_enable()
wt = Wlantest()
wt.flush()
logger.info("Check probe request filtering based on HESSID")
dev[0].request("SET hessid " + bssid2)
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, bssid2)
logger.info("Check probe request filtering based on access network type")
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid 00:00:00:00:00:00")
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid2, bssid)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid 00:00:00:00:00:00")
dev[0].request("SET access_network_type 1")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, bssid2)
logger.info("Check probe request filtering based on HESSID and ANT")
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid)
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid2, bssid)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid2)
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, None)
check_probe_resp(wt, bssid2, None)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid)
dev[0].request("SET access_network_type 1")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, None)
check_probe_resp(wt, bssid2, None)
def test_ap_hs20_select(dev, apdev):
"""Hotspot 2.0 network selection"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "no.match.example.com" })
interworking_select(dev[0], bssid, "roaming", freq="2412")
dev[0].set_cred_quoted(id, "realm", "no.match.example.com");
interworking_select(dev[0], bssid, no_match=True, freq="2412")
def hs20_simulated_sim(dev, ap, method):
bssid = ap['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "555,444"
params['domain_name'] = "wlan.mnc444.mcc555.3gppnetwork.org"
hostapd.add_ap(ap['ifname'], params)
dev.hs20_enable()
dev.add_cred_values({ 'imsi': "555444-333222111", 'eap': method,
'milenage': "5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123"})
interworking_select(dev, "home", freq="2412")
interworking_connect(dev, bssid, method)
check_sp_type(dev, "home")
def test_ap_hs20_sim(dev, apdev):
"""Hotspot 2.0 with simulated SIM and EAP-SIM"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "SIM")
def test_ap_hs20_aka(dev, apdev):
"""Hotspot 2.0 with simulated USIM and EAP-AKA"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "AKA")
def test_ap_hs20_aka_prime(dev, apdev):
"""Hotspot 2.0 with simulated USIM and EAP-AKA'"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "AKA'")
def test_ap_hs20_ext_sim(dev, apdev):
"""Hotspot 2.0 with external SIM processing"""
if not hlr_auc_gw_available():
return "skip"
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "232,01"
params['domain_name'] = "wlan.mnc001.mcc232.3gppnetwork.org"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].request("SET external_sim 1")
dev[0].add_cred_values({ 'imsi': "23201-0000000000", 'eap': "SIM" })
interworking_select(dev[0], "home", freq="2412")
interworking_ext_sim_connect(dev[0], bssid, "SIM")
check_sp_type(dev[0], "home")
def test_ap_hs20_ext_sim_roaming(dev, apdev):
"""Hotspot 2.0 with external SIM processing in roaming network"""
if not hlr_auc_gw_available():
return "skip"
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "244,91;310,026;232,01;234,56"
params['domain_name'] = "wlan.mnc091.mcc244.3gppnetwork.org"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].request("SET external_sim 1")
dev[0].add_cred_values({ 'imsi': "23201-0000000000", 'eap': "SIM" })
interworking_select(dev[0], "roaming", freq="2412")
interworking_ext_sim_connect(dev[0], bssid, "SIM")
check_sp_type(dev[0], "roaming")
def test_ap_hs20_username(dev, apdev):
"""Hotspot 2.0 connection in username/password credential"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "home")
def eap_test(dev, ap, eap_params, method, user):
bssid = ap['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com," + eap_params ]
hostapd.add_ap(ap['ifname'], params)
dev.hs20_enable()
dev.add_cred_values({ 'realm': "<EMAIL>",
'username': user,
'password': "password" })
interworking_select(dev, bssid, freq="2412")
interworking_connect(dev, bssid, method)
def test_ap_hs20_eap_peap_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with PEAP/MSCHAPV2"""
eap_test(dev[0], apdev[0], "25[3:26]", "PEAP", "user")
def test_ap_hs20_eap_peap_gtc(dev, apdev):
"""Hotspot 2.0 connection with PEAP/GTC"""
eap_test(dev[0], apdev[0], "25[3:6]", "PEAP", "user")
def test_ap_hs20_eap_ttls_chap(dev, apdev):
"""Hotspot 2.0 connection with TTLS/CHAP"""
eap_test(dev[0], apdev[0], "21[2:2]", "TTLS", "chap user")
def test_ap_hs20_eap_ttls_mschap(dev, apdev):
"""Hotspot 2.0 connection with TTLS/MSCHAP"""
eap_test(dev[0], apdev[0], "21[2:3]", "TTLS", "mschap user")
def test_ap_hs20_eap_ttls_eap_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with TTLS/EAP-MSCHAPv2"""
eap_test(dev[0], apdev[0], "21[3:26]", "TTLS", "user")
def test_ap_hs20_eap_fast_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with FAST/EAP-MSCHAPV2"""
eap_test(dev[0], apdev[0], "43[3:26]", "FAST", "user")
def test_ap_hs20_eap_fast_gtc(dev, apdev):
"""Hotspot 2.0 connection with FAST/EAP-GTC"""
eap_test(dev[0], apdev[0], "43[3:6]", "FAST", "user")
def test_ap_hs20_eap_tls(dev, apdev):
"""Hotspot 2.0 connection with EAP-TLS"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com,13[5:6]" ]
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].add_cred_values({ 'realm': "example.com",
'username': "certificate-user",
'ca_cert': "auth_serv/ca.pem",
'client_cert': "auth_serv/user.pem",
'private_key': "auth_serv/user.key"})
interworking_select(dev[0], bssid, freq="2412")
interworking_connect(dev[0], bssid, "TLS")
def test_ap_hs20_nai_realms(dev, apdev):
"""Hotspot 2.0 connection and multiple NAI realms and TTLS/PAP"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['nai_realm'] = [ "0,no.match.here;example.com;no.match.here.either,21[2:1][5:7]" ]
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "pap user",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "home")
def test_ap_hs20_roaming_consortium(dev, apdev):
"""Hotspot 2.0 connection based on roaming consortium match"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "user",
'password': "password",
'domain': "example.com",
'roaming_consortium': "fedcba",
'eap': "PEAP" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "PEAP")
check_sp_type(dev[0], "home")
def test_ap_hs20_username_roaming(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (roaming)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]",
"0,roaming.example.com,21[2:4][5:7]",
"0,another.example.com" ]
params['domain_name'] = "another.example.com"
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "roaming.example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "roaming", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "roaming")
def test_ap_hs20_username_unknown(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (no domain in cred)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password" })
interworking_select(dev[0], bssid, "unknown", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "unknown")
def test_ap_hs20_username_unknown2(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (no domain advertized)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
del params['domain_name']
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "unknown", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "unknown")
def test_ap_hs20_gas_while_associated(dev, apdev):
"""Hotspot 2.0 connection with GAS query while associated"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
logger.info("Verifying GAS query while associated")
dev[0].request("FETCH_ANQP")
for i in range(0, 6):
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("Operation timed out")
def test_ap_hs20_gas_frag_while_associated(dev, apdev):
"""Hotspot 2.0 connection with fragmented GAS query while associated"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
hapd.set("gas_frag_limit", "50")
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
logger.info("Verifying GAS query while associated")
dev[0].request("FETCH_ANQP")
for i in range(0, 6):
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("Operation timed out")
def test_ap_hs20_multiple_connects(dev, apdev):
"""Hotspot 2.0 connection through multiple network selections"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
values = { 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" }
id = dev[0].add_cred_values(values)
for i in range(0, 3):
logger.info("Starting Interworking network selection")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
while True:
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH",
"INTERWORKING-ALREADY-CONNECTED",
"CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching AP not found")
if "CTRL-EVENT-CONNECTED" in ev:
break
if i == 2 and "INTERWORKING-ALREADY-CONNECTED" in ev:
break
if i == 0:
dev[0].request("DISCONNECT")
dev[0].dump_monitor()
networks = dev[0].list_networks()
if len(networks) > 1:
raise Exception("Duplicated network block detected")
def test_ap_hs20_disallow_aps(dev, apdev):
"""Hotspot 2.0 connection and disallow_aps"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
values = { 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" }
id = dev[0].add_cred_values(values)
logger.info("Verify disallow_aps bssid")
dev[0].request("SET disallow_aps bssid " + bssid.translate(None, ':'))
dev[0].request("INTERWORKING_SELECT auto")
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH"], timeout=15)
if ev is None:
raise Exception("Network selection timed out")
dev[0].dump_monitor()
logger.info("Verify disallow_aps ssid")
dev[0].request("SET disallow_aps ssid 746573742d68733230")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH"], timeout=15)
if ev is None:
raise Exception("Network selection timed out")
dev[0].dump_monitor()
logger.info("Verify disallow_aps clear")
dev[0].request("SET disallow_aps ")
interworking_select(dev[0], bssid, "home", freq="2412")
dev[0].request("SET disallow_aps bssid " + bssid.translate(None, ':'))
ret = dev[0].request("INTERWORKING_CONNECT " + bssid)
if "FAIL" not in ret:
raise Exception("INTERWORKING_CONNECT to disallowed BSS not rejected")
def policy_test(dev, ap, values, only_one=True):
dev.dump_monitor()
logger.info("Verify network selection to AP " + ap['ifname'])
bssid = ap['bssid']
dev.hs20_enable()
id = dev.add_cred_values(values)
dev.request("INTERWORKING_SELECT auto freq=2412")
while True:
ev = dev.wait_event(["INTERWORKING-AP", "INTERWORKING-NO-MATCH",
"CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching AP not found")
if only_one and "INTERWORKING-AP" in ev and bssid not in ev:
raise Exception("Unexpected AP claimed acceptable")
if "CTRL-EVENT-CONNECTED" in ev:
if bssid not in ev:
raise Exception("Connected to incorrect BSS")
break
conn_bssid = dev.get_status_field("bssid")
if conn_bssid != bssid:
raise Exception("bssid information points to incorrect BSS")
dev.remove_cred(id)
dev.dump_monitor()
def default_cred():
return { 'realm': "example.com",
'username': "hs20-test",
'password': "password" }
def test_ap_hs20_req_roaming_consortium(dev, apdev):
"""Hotspot 2.0 required roaming consortium"""
params = hs20_ap_params()
hostapd.add_ap(apdev[0]['ifname'], params)
params = hs20_ap_params()
params['ssid'] = "test-hs20-other"
params['roaming_consortium'] = [ "223344" ]
hostapd.add_ap(apdev[1]['ifname'], params)
values = default_cred()
values['required_roaming_consortium'] = "223344"
policy_test(dev[0], apdev[1], values)
values['required_roaming_consortium'] = "112233"
policy_test(dev[0], apdev[0], values)
def test_ap_hs20_excluded_ssid(dev, apdev):
"""Hotspot 2.0 exclusion based on SSID"""
params = hs20_ap_params()
hostapd.add_ap(apdev[0]['ifname'], params)
params = hs20_ap_params()
params['ssid'] = "test-hs20-other"
params['roaming_consortium'] = [ "223344" ]
hostapd.add_ap(apdev[1]['ifname'], params)
values = default_cred()
values['excluded_ssid'] = "test-hs20"
policy_test(dev[0], apdev[1], values)
values['excluded_ssid'] = "test-hs20-other"
policy_test(dev[0], apdev[0], values) | tests/hwsim/test_ap_hs20.py |
import time
import subprocess
import logging
logger = logging.getLogger()
import os.path
import subprocess
import hostapd
from wlantest import Wlantest
def hs20_ap_params():
params = hostapd.wpa2_params(ssid="test-hs20")
params['wpa_key_mgmt'] = "WPA-EAP"
params['ieee80211w'] = "1"
params['ieee8021x'] = "1"
params['auth_server_addr'] = "127.0.0.1"
params['auth_server_port'] = "1812"
params['auth_server_shared_secret'] = "radius"
params['interworking'] = "1"
params['access_network_type'] = "14"
params['internet'] = "1"
params['asra'] = "0"
params['esr'] = "0"
params['uesa'] = "0"
params['venue_group'] = "7"
params['venue_type'] = "1"
params['venue_name'] = [ "eng:Example venue", "fin:Esimerkkipaikka" ]
params['roaming_consortium'] = [ "112233", "1020304050", "010203040506",
"fedcba" ]
params['domain_name'] = "example.com,another.example.com"
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]",
"0,another.example.com" ]
params['hs20'] = "1"
params['hs20_wan_metrics'] = "01:8000:1000:80:240:3000"
params['hs20_conn_capab'] = [ "1:0:2", "6:22:1", "17:5060:0" ]
params['hs20_operating_class'] = "5173"
params['anqp_3gpp_cell_net'] = "244,91"
return params
def interworking_select(dev, bssid, type=None, no_match=False, freq=None):
dev.dump_monitor()
freq_extra = " freq=" + freq if freq else ""
dev.request("INTERWORKING_SELECT" + freq_extra)
ev = dev.wait_event(["INTERWORKING-AP", "INTERWORKING-NO-MATCH"],
timeout=15)
if ev is None:
raise Exception("Network selection timed out");
if no_match:
if "INTERWORKING-NO-MATCH" not in ev:
raise Exception("Unexpected network match")
return
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching network not found")
if bssid and bssid not in ev:
raise Exception("Unexpected BSSID in match")
if type and "type=" + type not in ev:
raise Exception("Network type not recognized correctly")
def check_sp_type(dev, sp_type):
type = dev.get_status_field("sp_type")
if type is None:
raise Exception("sp_type not available")
if type != sp_type:
raise Exception("sp_type did not indicate home network")
def hlr_auc_gw_available():
if not os.path.exists("/tmp/hlr_auc_gw.sock"):
logger.info("No hlr_auc_gw available");
return False
if not os.path.exists("../../hostapd/hlr_auc_gw"):
logger.info("No hlr_auc_gw available");
return False
return True
def interworking_ext_sim_connect(dev, bssid, method):
dev.request("INTERWORKING_CONNECT " + bssid)
ev = dev.wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Network connected timed out")
if "(" + method + ")" not in ev:
raise Exception("Unexpected EAP method selection")
ev = dev.wait_event(["CTRL-REQ-SIM"], timeout=15)
if ev is None:
raise Exception("Wait for external SIM processing request timed out")
p = ev.split(':', 2)
if p[1] != "GSM-AUTH":
raise Exception("Unexpected CTRL-REQ-SIM type")
id = p[0].split('-')[3]
rand = p[2].split(' ')[0]
res = subprocess.check_output(["../../hostapd/hlr_auc_gw",
"-m",
"auth_serv/hlr_auc_gw.milenage_db",
"GSM-AUTH-REQ 232010000000000 " + rand])
if "GSM-AUTH-RESP" not in res:
raise Exception("Unexpected hlr_auc_gw response")
resp = res.split(' ')[2].rstrip()
dev.request("CTRL-RSP-SIM-" + id + ":GSM-AUTH:" + resp)
ev = dev.wait_event(["CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
def interworking_connect(dev, bssid, method):
dev.request("INTERWORKING_CONNECT " + bssid)
ev = dev.wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Network connected timed out")
if "(" + method + ")" not in ev:
raise Exception("Unexpected EAP method selection")
ev = dev.wait_event(["CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
def check_probe_resp(wt, bssid_unexpected, bssid_expected):
if bssid_unexpected:
count = wt.get_bss_counter("probe_response", bssid_unexpected)
if count > 0:
raise Exception("Unexpected Probe Response frame from AP")
if bssid_expected:
count = wt.get_bss_counter("probe_response", bssid_expected)
if count == 0:
raise Exception("No Probe Response frame from AP")
def test_ap_anqp_sharing(dev, apdev):
"""ANQP sharing within ESS and explicit unshare"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
bssid2 = apdev[1]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]" ]
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "example.com" })
logger.info("Normal network selection with shared ANQP results")
interworking_select(dev[0], None, "home", freq="2412")
dev[0].dump_monitor()
res1 = dev[0].get_bss(bssid)
res2 = dev[0].get_bss(bssid2)
if res1['anqp_nai_realm'] != res2['anqp_nai_realm']:
raise Exception("ANQP results were not shared between BSSes")
logger.info("Explicit ANQP request to unshare ANQP results")
dev[0].request("ANQP_GET " + bssid + " 263")
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("ANQP operation timed out")
dev[0].request("ANQP_GET " + bssid2 + " 263")
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("ANQP operation timed out")
res1 = dev[0].get_bss(bssid)
res2 = dev[0].get_bss(bssid2)
if res1['anqp_nai_realm'] == res2['anqp_nai_realm']:
raise Exception("ANQP results were not unshared")
def test_ap_interworking_scan_filtering(dev, apdev):
"""Interworking scan filtering with HESSID and access network type"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
ssid = "test-hs20-ap1"
params['ssid'] = ssid
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
bssid2 = apdev[1]['bssid']
params = hs20_ap_params()
ssid2 = "test-hs20-ap2"
params['ssid'] = ssid2
params['hessid'] = bssid2
params['access_network_type'] = "1"
del params['venue_group']
del params['venue_type']
hostapd.add_ap(apdev[1]['ifname'], params)
dev[0].hs20_enable()
wt = Wlantest()
wt.flush()
logger.info("Check probe request filtering based on HESSID")
dev[0].request("SET hessid " + bssid2)
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, bssid2)
logger.info("Check probe request filtering based on access network type")
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid 00:00:00:00:00:00")
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid2, bssid)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid 00:00:00:00:00:00")
dev[0].request("SET access_network_type 1")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, bssid2)
logger.info("Check probe request filtering based on HESSID and ANT")
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid)
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid2, bssid)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid2)
dev[0].request("SET access_network_type 14")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, None)
check_probe_resp(wt, bssid2, None)
wt.clear_bss_counters(bssid)
wt.clear_bss_counters(bssid2)
dev[0].request("SET hessid " + bssid)
dev[0].request("SET access_network_type 1")
dev[0].scan(freq="2412")
check_probe_resp(wt, bssid, None)
check_probe_resp(wt, bssid2, None)
def test_ap_hs20_select(dev, apdev):
"""Hotspot 2.0 network selection"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home")
dev[0].remove_cred(id)
id = dev[0].add_cred_values({ 'realm': "example.com", 'username': "test",
'password': "<PASSWORD>",
'domain': "no.match.example.com" })
interworking_select(dev[0], bssid, "roaming", freq="2412")
dev[0].set_cred_quoted(id, "realm", "no.match.example.com");
interworking_select(dev[0], bssid, no_match=True, freq="2412")
def hs20_simulated_sim(dev, ap, method):
bssid = ap['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "555,444"
params['domain_name'] = "wlan.mnc444.mcc555.3gppnetwork.org"
hostapd.add_ap(ap['ifname'], params)
dev.hs20_enable()
dev.add_cred_values({ 'imsi': "555444-333222111", 'eap': method,
'milenage': "5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123"})
interworking_select(dev, "home", freq="2412")
interworking_connect(dev, bssid, method)
check_sp_type(dev, "home")
def test_ap_hs20_sim(dev, apdev):
"""Hotspot 2.0 with simulated SIM and EAP-SIM"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "SIM")
def test_ap_hs20_aka(dev, apdev):
"""Hotspot 2.0 with simulated USIM and EAP-AKA"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "AKA")
def test_ap_hs20_aka_prime(dev, apdev):
"""Hotspot 2.0 with simulated USIM and EAP-AKA'"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "AKA'")
def test_ap_hs20_ext_sim(dev, apdev):
"""Hotspot 2.0 with external SIM processing"""
if not hlr_auc_gw_available():
return "skip"
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "232,01"
params['domain_name'] = "wlan.mnc001.mcc232.3gppnetwork.org"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].request("SET external_sim 1")
dev[0].add_cred_values({ 'imsi': "23201-0000000000", 'eap': "SIM" })
interworking_select(dev[0], "home", freq="2412")
interworking_ext_sim_connect(dev[0], bssid, "SIM")
check_sp_type(dev[0], "home")
def test_ap_hs20_ext_sim_roaming(dev, apdev):
"""Hotspot 2.0 with external SIM processing in roaming network"""
if not hlr_auc_gw_available():
return "skip"
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['anqp_3gpp_cell_net'] = "244,91;310,026;232,01;234,56"
params['domain_name'] = "wlan.mnc091.mcc244.3gppnetwork.org"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].request("SET external_sim 1")
dev[0].add_cred_values({ 'imsi': "23201-0000000000", 'eap': "SIM" })
interworking_select(dev[0], "roaming", freq="2412")
interworking_ext_sim_connect(dev[0], bssid, "SIM")
check_sp_type(dev[0], "roaming")
def test_ap_hs20_username(dev, apdev):
"""Hotspot 2.0 connection in username/password credential"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "home")
def eap_test(dev, ap, eap_params, method, user):
bssid = ap['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com," + eap_params ]
hostapd.add_ap(ap['ifname'], params)
dev.hs20_enable()
dev.add_cred_values({ 'realm': "<EMAIL>",
'username': user,
'password': "password" })
interworking_select(dev, bssid, freq="2412")
interworking_connect(dev, bssid, method)
def test_ap_hs20_eap_peap_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with PEAP/MSCHAPV2"""
eap_test(dev[0], apdev[0], "25[3:26]", "PEAP", "user")
def test_ap_hs20_eap_peap_gtc(dev, apdev):
"""Hotspot 2.0 connection with PEAP/GTC"""
eap_test(dev[0], apdev[0], "25[3:6]", "PEAP", "user")
def test_ap_hs20_eap_ttls_chap(dev, apdev):
"""Hotspot 2.0 connection with TTLS/CHAP"""
eap_test(dev[0], apdev[0], "21[2:2]", "TTLS", "chap user")
def test_ap_hs20_eap_ttls_mschap(dev, apdev):
"""Hotspot 2.0 connection with TTLS/MSCHAP"""
eap_test(dev[0], apdev[0], "21[2:3]", "TTLS", "mschap user")
def test_ap_hs20_eap_ttls_eap_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with TTLS/EAP-MSCHAPv2"""
eap_test(dev[0], apdev[0], "21[3:26]", "TTLS", "user")
def test_ap_hs20_eap_fast_mschapv2(dev, apdev):
"""Hotspot 2.0 connection with FAST/EAP-MSCHAPV2"""
eap_test(dev[0], apdev[0], "43[3:26]", "FAST", "user")
def test_ap_hs20_eap_fast_gtc(dev, apdev):
"""Hotspot 2.0 connection with FAST/EAP-GTC"""
eap_test(dev[0], apdev[0], "43[3:6]", "FAST", "user")
def test_ap_hs20_eap_tls(dev, apdev):
"""Hotspot 2.0 connection with EAP-TLS"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com,13[5:6]" ]
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
dev[0].add_cred_values({ 'realm': "example.com",
'username': "certificate-user",
'ca_cert': "auth_serv/ca.pem",
'client_cert': "auth_serv/user.pem",
'private_key': "auth_serv/user.key"})
interworking_select(dev[0], bssid, freq="2412")
interworking_connect(dev[0], bssid, "TLS")
def test_ap_hs20_nai_realms(dev, apdev):
"""Hotspot 2.0 connection and multiple NAI realms and TTLS/PAP"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
params['nai_realm'] = [ "0,no.match.here;example.com;no.match.here.either,21[2:1][5:7]" ]
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "pap user",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "home")
def test_ap_hs20_roaming_consortium(dev, apdev):
"""Hotspot 2.0 connection based on roaming consortium match"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "user",
'password': "password",
'domain': "example.com",
'roaming_consortium': "fedcba",
'eap': "PEAP" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "PEAP")
check_sp_type(dev[0], "home")
def test_ap_hs20_username_roaming(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (roaming)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['nai_realm'] = [ "0,example.com,13[5:6],21[2:4][5:7]",
"0,roaming.example.com,21[2:4][5:7]",
"0,another.example.com" ]
params['domain_name'] = "another.example.com"
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "roaming.example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "roaming", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "roaming")
def test_ap_hs20_username_unknown(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (no domain in cred)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password" })
interworking_select(dev[0], bssid, "unknown", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "unknown")
def test_ap_hs20_username_unknown2(dev, apdev):
"""Hotspot 2.0 connection in username/password credential (no domain advertized)"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
del params['domain_name']
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "unknown", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
check_sp_type(dev[0], "unknown")
def test_ap_hs20_gas_while_associated(dev, apdev):
"""Hotspot 2.0 connection with GAS query while associated"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
logger.info("Verifying GAS query while associated")
dev[0].request("FETCH_ANQP")
for i in range(0, 6):
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("Operation timed out")
def test_ap_hs20_gas_frag_while_associated(dev, apdev):
"""Hotspot 2.0 connection with fragmented GAS query while associated"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
hapd.set("gas_frag_limit", "50")
dev[0].hs20_enable()
id = dev[0].add_cred_values({ 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" })
interworking_select(dev[0], bssid, "home", freq="2412")
interworking_connect(dev[0], bssid, "TTLS")
logger.info("Verifying GAS query while associated")
dev[0].request("FETCH_ANQP")
for i in range(0, 6):
ev = dev[0].wait_event(["RX-ANQP"], timeout=5)
if ev is None:
raise Exception("Operation timed out")
def test_ap_hs20_multiple_connects(dev, apdev):
"""Hotspot 2.0 connection through multiple network selections"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
values = { 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" }
id = dev[0].add_cred_values(values)
for i in range(0, 3):
logger.info("Starting Interworking network selection")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
while True:
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH",
"INTERWORKING-ALREADY-CONNECTED",
"CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching AP not found")
if "CTRL-EVENT-CONNECTED" in ev:
break
if i == 2 and "INTERWORKING-ALREADY-CONNECTED" in ev:
break
if i == 0:
dev[0].request("DISCONNECT")
dev[0].dump_monitor()
networks = dev[0].list_networks()
if len(networks) > 1:
raise Exception("Duplicated network block detected")
def test_ap_hs20_disallow_aps(dev, apdev):
"""Hotspot 2.0 connection and disallow_aps"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].hs20_enable()
values = { 'realm': "example.com",
'username': "hs20-test",
'password': "password",
'domain': "example.com" }
id = dev[0].add_cred_values(values)
logger.info("Verify disallow_aps bssid")
dev[0].request("SET disallow_aps bssid " + bssid.translate(None, ':'))
dev[0].request("INTERWORKING_SELECT auto")
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH"], timeout=15)
if ev is None:
raise Exception("Network selection timed out")
dev[0].dump_monitor()
logger.info("Verify disallow_aps ssid")
dev[0].request("SET disallow_aps ssid 746573742d68733230")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
ev = dev[0].wait_event(["INTERWORKING-NO-MATCH"], timeout=15)
if ev is None:
raise Exception("Network selection timed out")
dev[0].dump_monitor()
logger.info("Verify disallow_aps clear")
dev[0].request("SET disallow_aps ")
interworking_select(dev[0], bssid, "home", freq="2412")
dev[0].request("SET disallow_aps bssid " + bssid.translate(None, ':'))
ret = dev[0].request("INTERWORKING_CONNECT " + bssid)
if "FAIL" not in ret:
raise Exception("INTERWORKING_CONNECT to disallowed BSS not rejected")
def policy_test(dev, ap, values, only_one=True):
dev.dump_monitor()
logger.info("Verify network selection to AP " + ap['ifname'])
bssid = ap['bssid']
dev.hs20_enable()
id = dev.add_cred_values(values)
dev.request("INTERWORKING_SELECT auto freq=2412")
while True:
ev = dev.wait_event(["INTERWORKING-AP", "INTERWORKING-NO-MATCH",
"CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Connection timed out")
if "INTERWORKING-NO-MATCH" in ev:
raise Exception("Matching AP not found")
if only_one and "INTERWORKING-AP" in ev and bssid not in ev:
raise Exception("Unexpected AP claimed acceptable")
if "CTRL-EVENT-CONNECTED" in ev:
if bssid not in ev:
raise Exception("Connected to incorrect BSS")
break
conn_bssid = dev.get_status_field("bssid")
if conn_bssid != bssid:
raise Exception("bssid information points to incorrect BSS")
dev.remove_cred(id)
dev.dump_monitor()
def default_cred():
return { 'realm': "example.com",
'username': "hs20-test",
'password': "password" }
def test_ap_hs20_req_roaming_consortium(dev, apdev):
"""Hotspot 2.0 required roaming consortium"""
params = hs20_ap_params()
hostapd.add_ap(apdev[0]['ifname'], params)
params = hs20_ap_params()
params['ssid'] = "test-hs20-other"
params['roaming_consortium'] = [ "223344" ]
hostapd.add_ap(apdev[1]['ifname'], params)
values = default_cred()
values['required_roaming_consortium'] = "223344"
policy_test(dev[0], apdev[1], values)
values['required_roaming_consortium'] = "112233"
policy_test(dev[0], apdev[0], values)
def test_ap_hs20_excluded_ssid(dev, apdev):
"""Hotspot 2.0 exclusion based on SSID"""
params = hs20_ap_params()
hostapd.add_ap(apdev[0]['ifname'], params)
params = hs20_ap_params()
params['ssid'] = "test-hs20-other"
params['roaming_consortium'] = [ "223344" ]
hostapd.add_ap(apdev[1]['ifname'], params)
values = default_cred()
values['excluded_ssid'] = "test-hs20"
policy_test(dev[0], apdev[1], values)
values['excluded_ssid'] = "test-hs20-other"
policy_test(dev[0], apdev[0], values) | 0.338186 | 0.084682 |
import asyncio
from typing import Any, Callable, Coroutine, Dict, List, Optional
from typing import TYPE_CHECKING
from pyppeteer.connection import CDPSession
from pyppeteer.page import Page
if TYPE_CHECKING:
from pyppeteer.browser import Browser, BrowserContext # noqa: F401
class Target(object):
"""Browser's target class."""
def __init__(self, targetInfo: Dict, browserContext: 'BrowserContext',
sessionFactory: Callable[[], Coroutine[Any, Any, CDPSession]],
ignoreHTTPSErrors: bool, defaultViewport: Optional[Dict],
screenshotTaskQueue: List, loop: asyncio.AbstractEventLoop
) -> None:
self._targetInfo = targetInfo
self._browserContext = browserContext
self._targetId = targetInfo.get('targetId', '')
self._sessionFactory = sessionFactory
self._ignoreHTTPSErrors = ignoreHTTPSErrors
self._defaultViewport = defaultViewport
self._screenshotTaskQueue = screenshotTaskQueue
self._loop = loop
self._page: Optional[Page] = None
self._initializedPromise = self._loop.create_future()
self._isClosedPromise = self._loop.create_future()
self._isInitialized = (self._targetInfo['type'] != 'page'
or self._targetInfo['url'] != '')
if self._isInitialized:
self._initializedCallback(True)
def _initializedCallback(self, bl: bool) -> None:
# TODO: this may cause error on page close
if self._initializedPromise.done():
self._initializedPromise = self._loop.create_future()
self._initializedPromise.set_result(bl)
def _closedCallback(self) -> None:
self._isClosedPromise.set_result(None)
async def createCDPSession(self) -> CDPSession:
"""Create a Chrome Devtools Protocol session attached to the target."""
return await self._sessionFactory()
async def page(self) -> Optional[Page]:
"""Get page of this target.
If the target is not of type "page" or "background_page", return
``None``.
"""
if (self._targetInfo['type'] in ['page', 'background_page'] and
self._page is None):
client = await self._sessionFactory()
new_page = await Page.create(
client, self,
self._ignoreHTTPSErrors,
self._defaultViewport,
self._screenshotTaskQueue,
)
self._page = new_page
return new_page
return self._page
@property
def url(self) -> str:
"""Get url of this target."""
return self._targetInfo['url']
@property
def type(self) -> str:
"""Get type of this target.
Type can be ``'page'``, ``'background_page'``, ``'service_worker'``,
``'browser'``, or ``'other'``.
"""
_type = self._targetInfo['type']
if _type in ['page', 'background_page', 'service_worker', 'browser']:
return _type
return 'other'
@property
def browser(self) -> 'Browser':
"""Get the browser the target belongs to."""
return self._browserContext.browser
@property
def browserContext(self) -> 'BrowserContext':
"""Return the browser context the target belongs to."""
return self._browserContext
@property
def opener(self) -> Optional['Target']:
"""Get the target that opened this target.
Top-level targets return ``None``.
"""
openerId = self._targetInfo.get('openerId')
if openerId is None:
return None
return self.browser._targets.get(openerId)
def _targetInfoChanged(self, targetInfo: Dict) -> None:
self._targetInfo = targetInfo
if not self._isInitialized and (self._targetInfo['type'] != 'page' or
self._targetInfo['url'] != ''):
self._isInitialized = True
self._initializedCallback(True)
return | pyppeteer/target.py | import asyncio
from typing import Any, Callable, Coroutine, Dict, List, Optional
from typing import TYPE_CHECKING
from pyppeteer.connection import CDPSession
from pyppeteer.page import Page
if TYPE_CHECKING:
from pyppeteer.browser import Browser, BrowserContext # noqa: F401
class Target(object):
"""Browser's target class."""
def __init__(self, targetInfo: Dict, browserContext: 'BrowserContext',
sessionFactory: Callable[[], Coroutine[Any, Any, CDPSession]],
ignoreHTTPSErrors: bool, defaultViewport: Optional[Dict],
screenshotTaskQueue: List, loop: asyncio.AbstractEventLoop
) -> None:
self._targetInfo = targetInfo
self._browserContext = browserContext
self._targetId = targetInfo.get('targetId', '')
self._sessionFactory = sessionFactory
self._ignoreHTTPSErrors = ignoreHTTPSErrors
self._defaultViewport = defaultViewport
self._screenshotTaskQueue = screenshotTaskQueue
self._loop = loop
self._page: Optional[Page] = None
self._initializedPromise = self._loop.create_future()
self._isClosedPromise = self._loop.create_future()
self._isInitialized = (self._targetInfo['type'] != 'page'
or self._targetInfo['url'] != '')
if self._isInitialized:
self._initializedCallback(True)
def _initializedCallback(self, bl: bool) -> None:
# TODO: this may cause error on page close
if self._initializedPromise.done():
self._initializedPromise = self._loop.create_future()
self._initializedPromise.set_result(bl)
def _closedCallback(self) -> None:
self._isClosedPromise.set_result(None)
async def createCDPSession(self) -> CDPSession:
"""Create a Chrome Devtools Protocol session attached to the target."""
return await self._sessionFactory()
async def page(self) -> Optional[Page]:
"""Get page of this target.
If the target is not of type "page" or "background_page", return
``None``.
"""
if (self._targetInfo['type'] in ['page', 'background_page'] and
self._page is None):
client = await self._sessionFactory()
new_page = await Page.create(
client, self,
self._ignoreHTTPSErrors,
self._defaultViewport,
self._screenshotTaskQueue,
)
self._page = new_page
return new_page
return self._page
@property
def url(self) -> str:
"""Get url of this target."""
return self._targetInfo['url']
@property
def type(self) -> str:
"""Get type of this target.
Type can be ``'page'``, ``'background_page'``, ``'service_worker'``,
``'browser'``, or ``'other'``.
"""
_type = self._targetInfo['type']
if _type in ['page', 'background_page', 'service_worker', 'browser']:
return _type
return 'other'
@property
def browser(self) -> 'Browser':
"""Get the browser the target belongs to."""
return self._browserContext.browser
@property
def browserContext(self) -> 'BrowserContext':
"""Return the browser context the target belongs to."""
return self._browserContext
@property
def opener(self) -> Optional['Target']:
"""Get the target that opened this target.
Top-level targets return ``None``.
"""
openerId = self._targetInfo.get('openerId')
if openerId is None:
return None
return self.browser._targets.get(openerId)
def _targetInfoChanged(self, targetInfo: Dict) -> None:
self._targetInfo = targetInfo
if not self._isInitialized and (self._targetInfo['type'] != 'page' or
self._targetInfo['url'] != ''):
self._isInitialized = True
self._initializedCallback(True)
return | 0.764364 | 0.089694 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import webapp2
import webtest
from dashboard import migrate_test_names
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import histogram
from tracing.value.diagnostics import generic_set
# Masters, bots and test names to add to the mock datastore.
_MOCK_DATA = [['ChromiumPerf'], ['win7', 'mac'], {
'SunSpider': {
'Total': {
't': {},
't_ref': {},
't_extwr': {},
},
'3d-cube': {
't': {}
},
},
'moz': {
'read_op_b': {
'r_op_b': {}
},
},
}]
class MigrateTestNamesTest(testing_common.TestCase):
def setUp(self):
super(MigrateTestNamesTest, self).setUp()
app = webapp2.WSGIApplication([
('/migrate_test_names', migrate_test_names.MigrateTestNamesHandler)
])
self.testapp = webtest.TestApp(app)
# Make sure puts get split up into multiple calls.
migrate_test_names._MAX_DATASTORE_PUTS_PER_PUT_MULTI_CALL = 30
self.SetCurrentUser('<EMAIL>')
testing_common.SetIsInternalUser('<EMAIL>', True)
def _AddMockData(self):
"""Adds sample TestMetadata, Row, and Anomaly entities."""
testing_common.AddTests(*_MOCK_DATA)
# Add 50 Row entities to one of the tests.
# Also add 2 Anomaly entities.
test_path = 'ChromiumPerf/mac/SunSpider/Total/t'
test_key = utils.TestKey(test_path)
test_container_key = utils.GetTestContainerKey(test_key)
for rev in range(15000, 15100, 2):
graph_data.Row(id=rev, parent=test_container_key, value=(rev * 2)).put()
if rev % 50 == 0:
data = generic_set.GenericSet(['foo_%s' % rev])
data = data.AsDict()
anomaly.Anomaly(
start_revision=(rev - 2),
end_revision=rev,
median_before_anomaly=100,
median_after_anomaly=50,
test=test_key).put()
histogram.SparseDiagnostic(
test=test_key,
start_revision=rev - 50,
end_revision=rev - 1,
data=data).put()
histogram.Histogram(test=test_key).put()
def _CheckRows(self, test_path, multiplier=2):
"""Checks the rows match the expected sample data for a given test.
The expected revisions that should be present are based on the sample data
added in _AddMockData.
Args:
test_path: Test path of the test to get rows for.
multiplier: Number to multiply with revision to get expected value.
"""
rows = graph_data.Row.query(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_path)).fetch()
self.assertEqual(50, len(rows))
self.assertEqual(15000, rows[0].revision)
self.assertEqual(15000 * multiplier, rows[0].value)
self.assertEqual(15098, rows[49].revision)
self.assertEqual(15098 * multiplier, rows[49].value)
t = utils.TestKey(test_path).get()
self.assertTrue(t.has_rows)
def _CheckAnomalies(self, test_path, r1=15000, r2=15050):
"""Checks whether the anomalies match the ones added in _AddMockData.
Args:
test_path: The test path for the TestMetadata which the Anomalies are on.
r1: Expected end revision of first Anomaly.
r2: Expected end revision of second Anomaly.
"""
key = utils.TestKey(test_path)
anomalies = anomaly.Anomaly.query(anomaly.Anomaly.test == key).fetch()
self.assertEqual(2, len(anomalies))
self.assertEqual(r1, anomalies[0].end_revision)
self.assertEqual(r2, anomalies[1].end_revision)
def _CheckHistogramData(self, test_path):
diagnostics = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == utils.TestKey(test_path)).fetch()
self.assertEqual(2, len(diagnostics))
histograms = histogram.SparseDiagnostic.query(
histogram.Histogram.test == utils.TestKey(test_path)).fetch()
self.assertEqual(2, len(histograms))
def _CheckTests(self, expected_tests):
"""Checks whether the current TestMetadata entities match the expected list.
Args:
expected_tests: List of test paths without the master/bot part.
"""
for master in _MOCK_DATA[0]:
for bot in _MOCK_DATA[1]:
expected = ['%s/%s/%s' % (master, bot, t) for t in expected_tests]
tests = graph_data.TestMetadata.query(
graph_data.TestMetadata.master_name == master,
graph_data.TestMetadata.bot_name == bot).fetch()
actual = [t.test_path for t in tests]
self.assertEqual(expected, actual)
def testPost_MigrateTraceLevelTest(self):
self._AddMockData()
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/*/*/t',
'new_pattern': '*/*/*/*/time',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/time',
'SunSpider/Total',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'SunSpider/Total/time',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSkippingIdentityMigrations(self):
self._AddMockData()
# Matches both t_ref and t_extwr, but only t_extwr gets migrated and
# t_ref is left untouched (otherwise t_ref would be deleted!).
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/*/*/t_*',
'new_pattern': '*/*/*/*/t_ref',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_RenameTraceWithPartialWildCardsInNewPattern_Fails(self):
# If there's a wildcard in a part of the new pattern, it should
# just be a single wildcard by itself, and it just means "copy
# over whatever was in the old test path". Wildcards mixed with
# substrings should be rejected.
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/Sun*/*/t',
'new_pattern': '*/*/Sun*/*/time',
},
status=400)
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# Nothing was renamed since there was an error.
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateChartLevelTest(self):
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/SunSpider/Total',
'new_pattern': '*/*/SunSpider/OverallScore',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
self._CheckRows('ChromiumPerf/mac/SunSpider/OverallScore/t')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider/OverallScore/t')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider/OverallScore/t')
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/OverallScore',
'SunSpider/OverallScore/t',
'SunSpider/OverallScore/t_extwr',
'SunSpider/OverallScore/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSuiteLevelTest(self):
self._AddMockData()
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/SunSpider',
'new_pattern': '*/*/SunSpider1.0',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
self._CheckRows('ChromiumPerf/mac/SunSpider1.0/Total/t')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider1.0/Total/t')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider1.0/Total/t')
expected_tests = [
'SunSpider1.0',
'SunSpider1.0/3d-cube',
'SunSpider1.0/3d-cube/t',
'SunSpider1.0/Total',
'SunSpider1.0/Total/t',
'SunSpider1.0/Total/t_extwr',
'SunSpider1.0/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSeriesToChartLevelTest(self):
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/SunSpider/Total/t',
'new_pattern': '*/*/SunSpider/Total',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# The Row and Anomaly entities have been moved.
self._CheckRows('ChromiumPerf/mac/SunSpider/Total')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider/Total')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider/Total')
# There is no SunSpider/Total/time any more.
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrationFinished_EmailsSheriff(self):
self._AddMockData()
# Add a sheriff for one test.
test_path = 'ChromiumPerf/mac/moz/read_op_b/r_op_b'
test = utils.TestKey(test_path).get()
test.put()
# Add another sheriff for another test.
test_path = 'ChromiumPerf/win7/moz/read_op_b/r_op_b'
test = utils.TestKey(test_path).get()
test.put()
# Make a request to t migrate a test and then execute tasks on the queue.
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/moz/read_op_b/r_op_b',
'new_pattern': '*/*/moz/read_operations_browser',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# Check the emails that were sent.
messages = self.mail_stub.get_sent_messages()
self.assertEqual(2, len(messages))
self.assertEqual('<EMAIL>', messages[0].sender)
self.assertEqual('<EMAIL>',
messages[0].to)
self.assertEqual('Sheriffed Test Migrated', messages[0].subject)
body = str(messages[0].body)
self.assertIn(
'test ChromiumPerf/mac/moz/read_op_b/r_op_b has been migrated', body)
self.assertIn('migrated to ChromiumPerf/mac/moz/read_operations_browser',
body)
self.assertEqual('<EMAIL>', messages[1].sender)
self.assertEqual('<EMAIL>',
messages[1].to)
self.assertEqual('Sheriffed Test Migrated', messages[1].subject)
body = str(messages[1].body)
self.assertIn(
'test ChromiumPerf/win7/moz/read_op_b/r_op_b has been migrated', body)
self.assertIn('migrated to ChromiumPerf/win7/moz/read_operations_browser',
body)
def testGetNewTestPath_WithAsterisks(self):
self.assertEqual(
'A/b/c/X',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*/X'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*/*'))
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*'))
def testGetNewTestPath_WithBrackets(self):
# Brackets are just used to delete parts of names, no other functionality.
self.assertEqual(
'A/b/c/x',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/xxxx',
'*/*/*/[xxx]'))
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/xxxx',
'*/*/*/[xxxx]'))
self.assertEqual(
'A/b/c/x',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/x', '*/*/*/[]'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('AA/bb/cc/dd',
'[A]/[b]/[c]/[d]'))
def testGetNewTestPath_NewPathHasDifferentLength(self):
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', 'A/*/c'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c', 'A/*/c/d'))
self.assertRaises(migrate_test_names.BadInputPatternError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/c',
'A/b/c/*')
def testGetNewTestPath_InvalidArgs(self):
self.assertRaises(AssertionError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/*/d',
'A/b/c/d')
self.assertRaises(migrate_test_names.BadInputPatternError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/c/d',
'A/b/c/d*')
if __name__ == '__main__':
unittest.main() | dashboard/dashboard/migrate_test_names_test.py |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import webapp2
import webtest
from dashboard import migrate_test_names
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import histogram
from tracing.value.diagnostics import generic_set
# Masters, bots and test names to add to the mock datastore.
_MOCK_DATA = [['ChromiumPerf'], ['win7', 'mac'], {
'SunSpider': {
'Total': {
't': {},
't_ref': {},
't_extwr': {},
},
'3d-cube': {
't': {}
},
},
'moz': {
'read_op_b': {
'r_op_b': {}
},
},
}]
class MigrateTestNamesTest(testing_common.TestCase):
def setUp(self):
super(MigrateTestNamesTest, self).setUp()
app = webapp2.WSGIApplication([
('/migrate_test_names', migrate_test_names.MigrateTestNamesHandler)
])
self.testapp = webtest.TestApp(app)
# Make sure puts get split up into multiple calls.
migrate_test_names._MAX_DATASTORE_PUTS_PER_PUT_MULTI_CALL = 30
self.SetCurrentUser('<EMAIL>')
testing_common.SetIsInternalUser('<EMAIL>', True)
def _AddMockData(self):
"""Adds sample TestMetadata, Row, and Anomaly entities."""
testing_common.AddTests(*_MOCK_DATA)
# Add 50 Row entities to one of the tests.
# Also add 2 Anomaly entities.
test_path = 'ChromiumPerf/mac/SunSpider/Total/t'
test_key = utils.TestKey(test_path)
test_container_key = utils.GetTestContainerKey(test_key)
for rev in range(15000, 15100, 2):
graph_data.Row(id=rev, parent=test_container_key, value=(rev * 2)).put()
if rev % 50 == 0:
data = generic_set.GenericSet(['foo_%s' % rev])
data = data.AsDict()
anomaly.Anomaly(
start_revision=(rev - 2),
end_revision=rev,
median_before_anomaly=100,
median_after_anomaly=50,
test=test_key).put()
histogram.SparseDiagnostic(
test=test_key,
start_revision=rev - 50,
end_revision=rev - 1,
data=data).put()
histogram.Histogram(test=test_key).put()
def _CheckRows(self, test_path, multiplier=2):
"""Checks the rows match the expected sample data for a given test.
The expected revisions that should be present are based on the sample data
added in _AddMockData.
Args:
test_path: Test path of the test to get rows for.
multiplier: Number to multiply with revision to get expected value.
"""
rows = graph_data.Row.query(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_path)).fetch()
self.assertEqual(50, len(rows))
self.assertEqual(15000, rows[0].revision)
self.assertEqual(15000 * multiplier, rows[0].value)
self.assertEqual(15098, rows[49].revision)
self.assertEqual(15098 * multiplier, rows[49].value)
t = utils.TestKey(test_path).get()
self.assertTrue(t.has_rows)
def _CheckAnomalies(self, test_path, r1=15000, r2=15050):
"""Checks whether the anomalies match the ones added in _AddMockData.
Args:
test_path: The test path for the TestMetadata which the Anomalies are on.
r1: Expected end revision of first Anomaly.
r2: Expected end revision of second Anomaly.
"""
key = utils.TestKey(test_path)
anomalies = anomaly.Anomaly.query(anomaly.Anomaly.test == key).fetch()
self.assertEqual(2, len(anomalies))
self.assertEqual(r1, anomalies[0].end_revision)
self.assertEqual(r2, anomalies[1].end_revision)
def _CheckHistogramData(self, test_path):
diagnostics = histogram.SparseDiagnostic.query(
histogram.SparseDiagnostic.test == utils.TestKey(test_path)).fetch()
self.assertEqual(2, len(diagnostics))
histograms = histogram.SparseDiagnostic.query(
histogram.Histogram.test == utils.TestKey(test_path)).fetch()
self.assertEqual(2, len(histograms))
def _CheckTests(self, expected_tests):
"""Checks whether the current TestMetadata entities match the expected list.
Args:
expected_tests: List of test paths without the master/bot part.
"""
for master in _MOCK_DATA[0]:
for bot in _MOCK_DATA[1]:
expected = ['%s/%s/%s' % (master, bot, t) for t in expected_tests]
tests = graph_data.TestMetadata.query(
graph_data.TestMetadata.master_name == master,
graph_data.TestMetadata.bot_name == bot).fetch()
actual = [t.test_path for t in tests]
self.assertEqual(expected, actual)
def testPost_MigrateTraceLevelTest(self):
self._AddMockData()
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/*/*/t',
'new_pattern': '*/*/*/*/time',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/time',
'SunSpider/Total',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'SunSpider/Total/time',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSkippingIdentityMigrations(self):
self._AddMockData()
# Matches both t_ref and t_extwr, but only t_extwr gets migrated and
# t_ref is left untouched (otherwise t_ref would be deleted!).
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/*/*/t_*',
'new_pattern': '*/*/*/*/t_ref',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_RenameTraceWithPartialWildCardsInNewPattern_Fails(self):
# If there's a wildcard in a part of the new pattern, it should
# just be a single wildcard by itself, and it just means "copy
# over whatever was in the old test path". Wildcards mixed with
# substrings should be rejected.
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/Sun*/*/t',
'new_pattern': '*/*/Sun*/*/time',
},
status=400)
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# Nothing was renamed since there was an error.
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateChartLevelTest(self):
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/SunSpider/Total',
'new_pattern': '*/*/SunSpider/OverallScore',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
self._CheckRows('ChromiumPerf/mac/SunSpider/OverallScore/t')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider/OverallScore/t')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider/OverallScore/t')
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/OverallScore',
'SunSpider/OverallScore/t',
'SunSpider/OverallScore/t_extwr',
'SunSpider/OverallScore/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSuiteLevelTest(self):
self._AddMockData()
self.testapp.post('/migrate_test_names', {
'old_pattern': '*/*/SunSpider',
'new_pattern': '*/*/SunSpider1.0',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
self._CheckRows('ChromiumPerf/mac/SunSpider1.0/Total/t')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider1.0/Total/t')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider1.0/Total/t')
expected_tests = [
'SunSpider1.0',
'SunSpider1.0/3d-cube',
'SunSpider1.0/3d-cube/t',
'SunSpider1.0/Total',
'SunSpider1.0/Total/t',
'SunSpider1.0/Total/t_extwr',
'SunSpider1.0/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrateSeriesToChartLevelTest(self):
self._AddMockData()
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/SunSpider/Total/t',
'new_pattern': '*/*/SunSpider/Total',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# The Row and Anomaly entities have been moved.
self._CheckRows('ChromiumPerf/mac/SunSpider/Total')
self._CheckAnomalies('ChromiumPerf/mac/SunSpider/Total')
self._CheckHistogramData('ChromiumPerf/mac/SunSpider/Total')
# There is no SunSpider/Total/time any more.
expected_tests = [
'SunSpider',
'SunSpider/3d-cube',
'SunSpider/3d-cube/t',
'SunSpider/Total',
'SunSpider/Total/t_extwr',
'SunSpider/Total/t_ref',
'moz',
'moz/read_op_b',
'moz/read_op_b/r_op_b',
]
self._CheckTests(expected_tests)
def testPost_MigrationFinished_EmailsSheriff(self):
self._AddMockData()
# Add a sheriff for one test.
test_path = 'ChromiumPerf/mac/moz/read_op_b/r_op_b'
test = utils.TestKey(test_path).get()
test.put()
# Add another sheriff for another test.
test_path = 'ChromiumPerf/win7/moz/read_op_b/r_op_b'
test = utils.TestKey(test_path).get()
test.put()
# Make a request to t migrate a test and then execute tasks on the queue.
self.testapp.post(
'/migrate_test_names', {
'old_pattern': '*/*/moz/read_op_b/r_op_b',
'new_pattern': '*/*/moz/read_operations_browser',
})
self.ExecuteTaskQueueTasks('/migrate_test_names',
migrate_test_names._TASK_QUEUE_NAME)
# Check the emails that were sent.
messages = self.mail_stub.get_sent_messages()
self.assertEqual(2, len(messages))
self.assertEqual('<EMAIL>', messages[0].sender)
self.assertEqual('<EMAIL>',
messages[0].to)
self.assertEqual('Sheriffed Test Migrated', messages[0].subject)
body = str(messages[0].body)
self.assertIn(
'test ChromiumPerf/mac/moz/read_op_b/r_op_b has been migrated', body)
self.assertIn('migrated to ChromiumPerf/mac/moz/read_operations_browser',
body)
self.assertEqual('<EMAIL>', messages[1].sender)
self.assertEqual('<EMAIL>',
messages[1].to)
self.assertEqual('Sheriffed Test Migrated', messages[1].subject)
body = str(messages[1].body)
self.assertIn(
'test ChromiumPerf/win7/moz/read_op_b/r_op_b has been migrated', body)
self.assertIn('migrated to ChromiumPerf/win7/moz/read_operations_browser',
body)
def testGetNewTestPath_WithAsterisks(self):
self.assertEqual(
'A/b/c/X',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*/X'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*/*'))
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', '*/*/*'))
def testGetNewTestPath_WithBrackets(self):
# Brackets are just used to delete parts of names, no other functionality.
self.assertEqual(
'A/b/c/x',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/xxxx',
'*/*/*/[xxx]'))
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/xxxx',
'*/*/*/[xxxx]'))
self.assertEqual(
'A/b/c/x',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/x', '*/*/*/[]'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('AA/bb/cc/dd',
'[A]/[b]/[c]/[d]'))
def testGetNewTestPath_NewPathHasDifferentLength(self):
self.assertEqual(
'A/b/c',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c/d', 'A/*/c'))
self.assertEqual(
'A/b/c/d',
migrate_test_names._ValidateAndGetNewTestPath('A/b/c', 'A/*/c/d'))
self.assertRaises(migrate_test_names.BadInputPatternError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/c',
'A/b/c/*')
def testGetNewTestPath_InvalidArgs(self):
self.assertRaises(AssertionError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/*/d',
'A/b/c/d')
self.assertRaises(migrate_test_names.BadInputPatternError,
migrate_test_names._ValidateAndGetNewTestPath, 'A/b/c/d',
'A/b/c/d*')
if __name__ == '__main__':
unittest.main() | 0.768125 | 0.562837 |
# Copyright (c) 2014-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
# The default command to import just layers is:
# layers2es --interfaces desktop mobile --no-folders --no-blocks --no-themes \
# --recreate-index --app-config development.ini
import sys
import yaml
import requests
import json
from argparse import ArgumentParser
from pyramid.paster import get_app, bootstrap
from pyramid.i18n import TranslationStringFactory, make_localizer
from pyramid.interfaces import ITranslationDirectories
from geoportailv3.lib.search import get_elasticsearch, get_index, ensure_index
from elasticsearch import helpers
from elasticsearch.helpers import BulkIndexError
from elasticsearch.exceptions import ConnectionTimeout
def statuslog(text):
sys.stdout.write(text)
sys.stdout.flush()
def main():
parser = ArgumentParser(
prog=sys.argv[0], add_help=True,
description="Tool to fill the tsearch table (Full-Text Search) "
"from the theme informations.",
)
parser.add_argument(
"--interfaces",
nargs='+',
required=True,
help="the interfaces to export",
)
parser.add_argument(
"--duplicate-name",
action="store_true",
dest="name",
help="allows to add a name more than one time,\n"
"by default if we find more than one element with the same name "
"only one will be imported",
)
parser.add_argument(
"--no-themes",
action="store_false",
dest="themes",
help="don't import the themes",
)
parser.add_argument(
"--no-blocks",
action="store_false",
dest="blocks",
help="don't import the blocks (first level layer groups)",
)
parser.add_argument(
"--no-folders",
action="store_false",
dest="folders",
help="don't import the folders (tree folders)",
)
parser.add_argument(
"--no-layers",
action="store_false",
dest="layers",
help="don't import the layers (tree leaf)",
)
parser.add_argument(
"--recreate-index",
action="store_true",
dest="recreate_index",
help="recreate the index",
)
parser.add_argument(
"--package",
help="the application package",
)
parser.add_argument(
"-i", "--app-config",
default="production.ini",
dest="app_config",
help="the application .ini config file "
"(optional, default is 'production.ini')"
)
parser.add_argument(
"-n", "--app-name",
default="app",
dest="app_name",
help="the application name (optional, default is 'app')"
)
options = parser.parse_args()
app_config = options.app_config
app_name = options.app_name
if app_name is None and "#" in app_config:
app_config, app_name = app_config.split("#", 1)
get_app(app_config, name=app_name)
Import(options)
class Import:
def __init__(self, options):
self.options = options
self.imported = set()
self.layers = []
settings = {}
with open(".build/config.yaml") as f:
settings = yaml.load(f)
self.languages = settings["available_locale_names"]
# must be done only once we have loaded the project config
from c2cgeoportal.models import DBSession, Interface, Theme, Role
self.session = DBSession()
self._ = {}
self.metadata_service_url = \
'http://shop.geoportail.lu/Portail/inspire/webservices/getMD.jsp'
registry = bootstrap(self.options.app_config)['registry']
request = bootstrap(self.options.app_config)['request']
self.es_layer_index = get_index(request) + '_layers'
self.tdirs = registry.queryUtility(ITranslationDirectories, default=[])
self.tsf = TranslationStringFactory('geoportailv3-client')
self.interfaces = self.session.query(Interface).filter(
Interface.name.in_(options.interfaces)
).all()
self.public_theme = {}
self.public_group = {}
for interface in self.interfaces:
self.public_theme[interface.id] = []
self.public_group[interface.id] = []
for theme in self.session.query(Theme).filter_by(public=True).all():
self._add_theme(theme)
for role in self.session.query(Role).all():
for theme in self.session.query(Theme).all():
self._add_theme(theme, role)
ensure_index(
get_elasticsearch(request),
self.es_layer_index,
options.recreate_index
)
try:
helpers.bulk(actions=self.layers,
client=get_elasticsearch(request),
raise_on_error=True)
except (BulkIndexError, ConnectionTimeout) as e:
statuslog("\n %s" % e)
def _update_document(self, obj=None):
doc = {
"_index": self.es_layer_index,
"_type": 'layer',
"_id": str(obj['layer_id']) + "_" +
obj['language'] + "_" + str(obj['role_id']),
"_source": obj
}
return doc
def _add_fts(self, item, interface, action, role):
key = (
item.name if self.options.name else item.id,
interface.id,
role.id if role is not None else None
)
if key not in self.imported:
self.imported.add(key)
for lang in self.languages:
localizer = make_localizer(lang, self.tdirs)
translated_name = localizer.translate(self.tsf(item.name))
if role is None:
role_id = None
else:
role_id = role.id
fts = {
'layer_id': item.id,
'name_translated': translated_name,
'name': item.name,
'role_id': role_id,
'interface': interface.name,
'language': lang,
'public': role is None,
'type': action,
'keywords': '',
'description': '',
'metadata_name': ''
}
for metadata in item.ui_metadata:
if metadata.name == 'metadata_id':
params = dict(
uid=metadata.value,
lang=lang
)
try:
resp = requests.get(url=self.metadata_service_url,
params=params)
data = json.loads(resp.text)
try:
fts['keywords'] = data['root'][0]['keywords']
fts['description'] = \
data['root'][0]['description']
fts['metadata_name'] = data['root'][0]['name']
except KeyError as e:
statuslog("\n %s" % e)
except requests.exceptions.RequestException as e:
statuslog("\n %s" % e)
sys.exit(1)
doc = self._update_document(fts)
self.layers.append(doc)
def _add_theme(self, theme, role=None):
fill = False
for interface in self.interfaces:
if interface in theme.interfaces:
for child in theme.children:
fill = self._add_block(child, interface, role) or fill
if fill and self.options.themes:
if role is None:
self.public_theme[interface.id].append(theme.id)
if role is None or \
theme.id not in self.public_theme[interface.id]:
self._add_fts(theme, interface, "add_theme", role)
def _add_block(self, group, interface, role):
return self._add_group(group, interface, self.options.blocks, role)
def _add_folder(self, group, interface, role):
return self._add_group(group, interface, self.options.folders, role)
def _add_group(self, group, interface, export, role):
from c2cgeoportal.models import LayerGroup
fill = False
if hasattr(group, 'children'):
for child in group.children:
if isinstance(child, LayerGroup):
fill = self._add_folder(child, interface, role) or fill
else:
fill = self._add_layer(child, interface, role) or fill
else:
fill = self._add_layer(group, interface, role) or fill
if fill and export:
if role is None:
self.public_group[interface.id].append(group.id)
if role is None or group.id not in self.public_group[interface.id]:
self._add_fts(group, interface, "add_group", role)
return fill
def _layer_visible(self, layer, role):
for restrictionarea in layer.restrictionareas:
if role in restrictionarea.roles:
return True
return False
def _add_layer(self, layer, interface, role):
from c2cgeoportal.models import LayerV1
if isinstance(layer, LayerV1):
return False
if role is None:
fill = layer.public and interface in layer.interfaces
else:
fill = interface in layer.interfaces and not layer.public and \
self._layer_visible(layer, role)
if fill and self.options.layers:
self._add_fts(layer, interface, "add_layer", role)
return fill | geoportailv3/scripts/layers2es.py |
# Copyright (c) 2014-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
# The default command to import just layers is:
# layers2es --interfaces desktop mobile --no-folders --no-blocks --no-themes \
# --recreate-index --app-config development.ini
import sys
import yaml
import requests
import json
from argparse import ArgumentParser
from pyramid.paster import get_app, bootstrap
from pyramid.i18n import TranslationStringFactory, make_localizer
from pyramid.interfaces import ITranslationDirectories
from geoportailv3.lib.search import get_elasticsearch, get_index, ensure_index
from elasticsearch import helpers
from elasticsearch.helpers import BulkIndexError
from elasticsearch.exceptions import ConnectionTimeout
def statuslog(text):
sys.stdout.write(text)
sys.stdout.flush()
def main():
parser = ArgumentParser(
prog=sys.argv[0], add_help=True,
description="Tool to fill the tsearch table (Full-Text Search) "
"from the theme informations.",
)
parser.add_argument(
"--interfaces",
nargs='+',
required=True,
help="the interfaces to export",
)
parser.add_argument(
"--duplicate-name",
action="store_true",
dest="name",
help="allows to add a name more than one time,\n"
"by default if we find more than one element with the same name "
"only one will be imported",
)
parser.add_argument(
"--no-themes",
action="store_false",
dest="themes",
help="don't import the themes",
)
parser.add_argument(
"--no-blocks",
action="store_false",
dest="blocks",
help="don't import the blocks (first level layer groups)",
)
parser.add_argument(
"--no-folders",
action="store_false",
dest="folders",
help="don't import the folders (tree folders)",
)
parser.add_argument(
"--no-layers",
action="store_false",
dest="layers",
help="don't import the layers (tree leaf)",
)
parser.add_argument(
"--recreate-index",
action="store_true",
dest="recreate_index",
help="recreate the index",
)
parser.add_argument(
"--package",
help="the application package",
)
parser.add_argument(
"-i", "--app-config",
default="production.ini",
dest="app_config",
help="the application .ini config file "
"(optional, default is 'production.ini')"
)
parser.add_argument(
"-n", "--app-name",
default="app",
dest="app_name",
help="the application name (optional, default is 'app')"
)
options = parser.parse_args()
app_config = options.app_config
app_name = options.app_name
if app_name is None and "#" in app_config:
app_config, app_name = app_config.split("#", 1)
get_app(app_config, name=app_name)
Import(options)
class Import:
def __init__(self, options):
self.options = options
self.imported = set()
self.layers = []
settings = {}
with open(".build/config.yaml") as f:
settings = yaml.load(f)
self.languages = settings["available_locale_names"]
# must be done only once we have loaded the project config
from c2cgeoportal.models import DBSession, Interface, Theme, Role
self.session = DBSession()
self._ = {}
self.metadata_service_url = \
'http://shop.geoportail.lu/Portail/inspire/webservices/getMD.jsp'
registry = bootstrap(self.options.app_config)['registry']
request = bootstrap(self.options.app_config)['request']
self.es_layer_index = get_index(request) + '_layers'
self.tdirs = registry.queryUtility(ITranslationDirectories, default=[])
self.tsf = TranslationStringFactory('geoportailv3-client')
self.interfaces = self.session.query(Interface).filter(
Interface.name.in_(options.interfaces)
).all()
self.public_theme = {}
self.public_group = {}
for interface in self.interfaces:
self.public_theme[interface.id] = []
self.public_group[interface.id] = []
for theme in self.session.query(Theme).filter_by(public=True).all():
self._add_theme(theme)
for role in self.session.query(Role).all():
for theme in self.session.query(Theme).all():
self._add_theme(theme, role)
ensure_index(
get_elasticsearch(request),
self.es_layer_index,
options.recreate_index
)
try:
helpers.bulk(actions=self.layers,
client=get_elasticsearch(request),
raise_on_error=True)
except (BulkIndexError, ConnectionTimeout) as e:
statuslog("\n %s" % e)
def _update_document(self, obj=None):
doc = {
"_index": self.es_layer_index,
"_type": 'layer',
"_id": str(obj['layer_id']) + "_" +
obj['language'] + "_" + str(obj['role_id']),
"_source": obj
}
return doc
def _add_fts(self, item, interface, action, role):
key = (
item.name if self.options.name else item.id,
interface.id,
role.id if role is not None else None
)
if key not in self.imported:
self.imported.add(key)
for lang in self.languages:
localizer = make_localizer(lang, self.tdirs)
translated_name = localizer.translate(self.tsf(item.name))
if role is None:
role_id = None
else:
role_id = role.id
fts = {
'layer_id': item.id,
'name_translated': translated_name,
'name': item.name,
'role_id': role_id,
'interface': interface.name,
'language': lang,
'public': role is None,
'type': action,
'keywords': '',
'description': '',
'metadata_name': ''
}
for metadata in item.ui_metadata:
if metadata.name == 'metadata_id':
params = dict(
uid=metadata.value,
lang=lang
)
try:
resp = requests.get(url=self.metadata_service_url,
params=params)
data = json.loads(resp.text)
try:
fts['keywords'] = data['root'][0]['keywords']
fts['description'] = \
data['root'][0]['description']
fts['metadata_name'] = data['root'][0]['name']
except KeyError as e:
statuslog("\n %s" % e)
except requests.exceptions.RequestException as e:
statuslog("\n %s" % e)
sys.exit(1)
doc = self._update_document(fts)
self.layers.append(doc)
def _add_theme(self, theme, role=None):
fill = False
for interface in self.interfaces:
if interface in theme.interfaces:
for child in theme.children:
fill = self._add_block(child, interface, role) or fill
if fill and self.options.themes:
if role is None:
self.public_theme[interface.id].append(theme.id)
if role is None or \
theme.id not in self.public_theme[interface.id]:
self._add_fts(theme, interface, "add_theme", role)
def _add_block(self, group, interface, role):
return self._add_group(group, interface, self.options.blocks, role)
def _add_folder(self, group, interface, role):
return self._add_group(group, interface, self.options.folders, role)
def _add_group(self, group, interface, export, role):
from c2cgeoportal.models import LayerGroup
fill = False
if hasattr(group, 'children'):
for child in group.children:
if isinstance(child, LayerGroup):
fill = self._add_folder(child, interface, role) or fill
else:
fill = self._add_layer(child, interface, role) or fill
else:
fill = self._add_layer(group, interface, role) or fill
if fill and export:
if role is None:
self.public_group[interface.id].append(group.id)
if role is None or group.id not in self.public_group[interface.id]:
self._add_fts(group, interface, "add_group", role)
return fill
def _layer_visible(self, layer, role):
for restrictionarea in layer.restrictionareas:
if role in restrictionarea.roles:
return True
return False
def _add_layer(self, layer, interface, role):
from c2cgeoportal.models import LayerV1
if isinstance(layer, LayerV1):
return False
if role is None:
fill = layer.public and interface in layer.interfaces
else:
fill = interface in layer.interfaces and not layer.public and \
self._layer_visible(layer, role)
if fill and self.options.layers:
self._add_fts(layer, interface, "add_layer", role)
return fill | 0.564459 | 0.050191 |
import pyaudio
import time
import numpy as np
import pygame
import math
from scipy.ndimage import gaussian_filter1d
from scipy.special import softmax
from dfts import do_dfts
from util import *
samplerate = 44100
n_samples = 1024
bin_width = samplerate / n_samples
# Setup pygame
pygame.init()
window_w = 865
window_h = 512
screen = pygame.display.set_mode((window_w, window_h))
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# Open pyaudio input stream
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=samplerate,
input=True)
# start the stream
stream.start_stream()
# Piano key numbers
n_skipped = 1
octaves = 6
#pianokeys = np.arange(25, (octaves * 12) + 1, 0.5)
pianokeys = np.arange((12 * n_skipped) + 1, (12 * n_skipped) + 1 + (octaves * 12), 0.5)
# Gather frequencies to DFT at
freqs = np.array([getfreq(key_n) for key_n in pianokeys])
# ---- create scaling based on how often frequency bins overlap -----
# Need previous two octaves to get bins correct
prev_pianokeys = np.arange((12 * n_skipped) + 1 - 24, (12 * n_skipped) + 1, 0.5)
prev_freqs = np.array([getfreq(key_n) for key_n in prev_pianokeys])
allfreqs = np.concatenate((prev_freqs, freqs))
# Create scaling based on bin width
bins = []
for freq in allfreqs:
halfbinw = bin_width / 2
lower = freq - halfbinw
upper = freq + halfbinw
bins.append((lower, upper))
# Count the amount of bins a certain frequency is in
scale_count = np.zeros(len(freqs))
for idx, freq in enumerate(freqs):
for (l, h) in bins:
if freq > l and freq <= h:
# How far from the sides are we?
ldist = freq - l
rdist = h - freq
#normalize
ldist_n = ldist / (ldist + rdist)
rdist_n = rdist / (ldist + rdist)
dist = min(ldist_n, rdist_n) * 2
scale_count[idx] += dist
scaler = 1 - (scale_count / np.max(scale_count))
# Memory to keep rolling average
n_keep = 5
avg_mem_idx = 0
avg_mem = np.zeros((n_keep, len(freqs)))
# Keep notes
n_keep_notes = 1
actual_notes_idx = 0
actual_notes_mem = np.zeros((n_keep_notes, 12))
# MAIN LOOP
should_quit = False
while stream.is_active() and not should_quit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
should_quit = True
start_time = time.time()
# Read samples
samples = stream.read(n_samples)
samples = np.frombuffer(samples, dtype=np.float32)
# Apply windowing function
samples = np.blackman(len(samples)) * samples
# Perform DFT
# We cannot do FFT because we need the frequency bins to be chromatic
dftime = time.time()
dfts = np.abs(do_dfts(freqs, samples, samplerate))
print(f'DFT took {time.time() - dftime} s, { 1/ (time.time() - dftime)} Hz')
#Taper first octave
taper = np.ones(dfts.size)
taper[0:24] = np.linspace(0, 1, 24)
#taper[0:48] = 1 - np.flip(np.geomspace(0.0001, 1, 48))
dfts = dfts * scaler
#dfts = dfts * taper
# Add dft result to rolling average
avg_mem[avg_mem_idx, :] = dfts
avg_mem_idx = (avg_mem_idx + 1) % n_keep
avged_dfts = np.mean(avg_mem, axis=0)
# Fold dft output
notes = foldfft(avged_dfts, 24, 24)
# Filter notes
sigma = 1
filtered_notes = gaussian_filter1d(notes, sigma, mode='wrap')
# Find peaks
peaks = []
# Include wraparound for first and last bin
wrapback = [np.array([filtered_notes[-1], filtered_notes[0], filtered_notes[1]])]
wrapfront = [np.array([filtered_notes[-2], filtered_notes[-1], filtered_notes[0]])]
triplets = [filtered_notes[x: x + 3] for x in range(len(filtered_notes) - 2)]
triplets = wrapback + triplets + wrapfront
for idx, (prev, cur, next) in enumerate(triplets):
if prev < cur and cur > next:
peaks.append((idx, cur))
# Only keep n largest peaks
n_keep_peaks = 5
peaks = sorted(peaks, key=lambda x: x[1], reverse=True)[0:n_keep_peaks]
# Put peaks in note bins
for (idx, amplitude) in peaks:
if idx in range(0, 24, 2):
actual_notes_mem[actual_notes_idx, idx // 2] = amplitude
else:
prev = math.floor(idx / 2)
next = math.ceil(idx / 2)
actual_notes_mem[actual_notes_idx, prev] = 0.5 * amplitude
actual_notes_mem[actual_notes_idx, next % 12] = 0.5 * amplitude
# Decay note bins
actual_notes_mem[actual_notes_idx, :] *= 0.8
# # Try softmax to see if it helps isolate notes better?
# actual_notes_mem[actual_notes_idx, :] = softmax(actual_notes_mem[actual_notes_idx])
# actual_notes_mem[actual_notes_idx, :] -= np.min(actual_notes_mem[actual_notes_idx])
actual_notes_idx = (actual_notes_idx + 1) % n_keep_notes
actual_notes = np.mean(actual_notes_mem, axis=0) * 0.1
# --------------Draw visualization-----------------
screen.fill((0, 0, 0))
# Draw folded dft
per_note = window_w // len(notes)
for i, note in enumerate(notes):
color = (255, 255, 255) if np.argmax(notes) == i else bin2color(i, 24)
#color = bin2color(i, 24)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, 0, per_note - 1, note * 2000))
# Draw filtered note distribution
amp = 4000
per_note = window_w // len(filtered_notes)
# Calc line points
coords = [(per_note * i + 0.5 * per_note, note * amp) for i, note in enumerate(filtered_notes)]
# Loop around edges
coords = [(-1.5 * per_note, filtered_notes[23] * amp)] + coords + [((len(filtered_notes) + 1.5) * per_note, filtered_notes[0] * amp)]
pygame.draw.lines(screen, (255, 255, 255), False, coords)
# Draw peak points
for (idx, amplitude) in peaks:
pygame.draw.circle(screen, bin2color(idx, 24), (int(per_note * idx + .5 * per_note), int(amplitude * amp)), int(amplitude * 1000))
# Draw complete dft
per_note = window_w // len(avged_dfts)
for i, note in enumerate(avged_dfts):
color = bin2color(i, 24)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, window_h, per_note - 1, note * -2000))
# Draw actual notes
per_note = window_w // len(actual_notes)
for i, note in enumerate(actual_notes):
color = bin2color(i, 12)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, window_h - 100, per_note - 1, note * -100000))
end_time = time.time()
print('Total time', end_time - start_time, 1 / (end_time - start_time), 'Fps')
pygame.display.flip()
# Cleanup
stream.stop_stream()
stream.close()
p.terminate() | main.py | import pyaudio
import time
import numpy as np
import pygame
import math
from scipy.ndimage import gaussian_filter1d
from scipy.special import softmax
from dfts import do_dfts
from util import *
samplerate = 44100
n_samples = 1024
bin_width = samplerate / n_samples
# Setup pygame
pygame.init()
window_w = 865
window_h = 512
screen = pygame.display.set_mode((window_w, window_h))
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# Open pyaudio input stream
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=samplerate,
input=True)
# start the stream
stream.start_stream()
# Piano key numbers
n_skipped = 1
octaves = 6
#pianokeys = np.arange(25, (octaves * 12) + 1, 0.5)
pianokeys = np.arange((12 * n_skipped) + 1, (12 * n_skipped) + 1 + (octaves * 12), 0.5)
# Gather frequencies to DFT at
freqs = np.array([getfreq(key_n) for key_n in pianokeys])
# ---- create scaling based on how often frequency bins overlap -----
# Need previous two octaves to get bins correct
prev_pianokeys = np.arange((12 * n_skipped) + 1 - 24, (12 * n_skipped) + 1, 0.5)
prev_freqs = np.array([getfreq(key_n) for key_n in prev_pianokeys])
allfreqs = np.concatenate((prev_freqs, freqs))
# Create scaling based on bin width
bins = []
for freq in allfreqs:
halfbinw = bin_width / 2
lower = freq - halfbinw
upper = freq + halfbinw
bins.append((lower, upper))
# Count the amount of bins a certain frequency is in
scale_count = np.zeros(len(freqs))
for idx, freq in enumerate(freqs):
for (l, h) in bins:
if freq > l and freq <= h:
# How far from the sides are we?
ldist = freq - l
rdist = h - freq
#normalize
ldist_n = ldist / (ldist + rdist)
rdist_n = rdist / (ldist + rdist)
dist = min(ldist_n, rdist_n) * 2
scale_count[idx] += dist
scaler = 1 - (scale_count / np.max(scale_count))
# Memory to keep rolling average
n_keep = 5
avg_mem_idx = 0
avg_mem = np.zeros((n_keep, len(freqs)))
# Keep notes
n_keep_notes = 1
actual_notes_idx = 0
actual_notes_mem = np.zeros((n_keep_notes, 12))
# MAIN LOOP
should_quit = False
while stream.is_active() and not should_quit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
should_quit = True
start_time = time.time()
# Read samples
samples = stream.read(n_samples)
samples = np.frombuffer(samples, dtype=np.float32)
# Apply windowing function
samples = np.blackman(len(samples)) * samples
# Perform DFT
# We cannot do FFT because we need the frequency bins to be chromatic
dftime = time.time()
dfts = np.abs(do_dfts(freqs, samples, samplerate))
print(f'DFT took {time.time() - dftime} s, { 1/ (time.time() - dftime)} Hz')
#Taper first octave
taper = np.ones(dfts.size)
taper[0:24] = np.linspace(0, 1, 24)
#taper[0:48] = 1 - np.flip(np.geomspace(0.0001, 1, 48))
dfts = dfts * scaler
#dfts = dfts * taper
# Add dft result to rolling average
avg_mem[avg_mem_idx, :] = dfts
avg_mem_idx = (avg_mem_idx + 1) % n_keep
avged_dfts = np.mean(avg_mem, axis=0)
# Fold dft output
notes = foldfft(avged_dfts, 24, 24)
# Filter notes
sigma = 1
filtered_notes = gaussian_filter1d(notes, sigma, mode='wrap')
# Find peaks
peaks = []
# Include wraparound for first and last bin
wrapback = [np.array([filtered_notes[-1], filtered_notes[0], filtered_notes[1]])]
wrapfront = [np.array([filtered_notes[-2], filtered_notes[-1], filtered_notes[0]])]
triplets = [filtered_notes[x: x + 3] for x in range(len(filtered_notes) - 2)]
triplets = wrapback + triplets + wrapfront
for idx, (prev, cur, next) in enumerate(triplets):
if prev < cur and cur > next:
peaks.append((idx, cur))
# Only keep n largest peaks
n_keep_peaks = 5
peaks = sorted(peaks, key=lambda x: x[1], reverse=True)[0:n_keep_peaks]
# Put peaks in note bins
for (idx, amplitude) in peaks:
if idx in range(0, 24, 2):
actual_notes_mem[actual_notes_idx, idx // 2] = amplitude
else:
prev = math.floor(idx / 2)
next = math.ceil(idx / 2)
actual_notes_mem[actual_notes_idx, prev] = 0.5 * amplitude
actual_notes_mem[actual_notes_idx, next % 12] = 0.5 * amplitude
# Decay note bins
actual_notes_mem[actual_notes_idx, :] *= 0.8
# # Try softmax to see if it helps isolate notes better?
# actual_notes_mem[actual_notes_idx, :] = softmax(actual_notes_mem[actual_notes_idx])
# actual_notes_mem[actual_notes_idx, :] -= np.min(actual_notes_mem[actual_notes_idx])
actual_notes_idx = (actual_notes_idx + 1) % n_keep_notes
actual_notes = np.mean(actual_notes_mem, axis=0) * 0.1
# --------------Draw visualization-----------------
screen.fill((0, 0, 0))
# Draw folded dft
per_note = window_w // len(notes)
for i, note in enumerate(notes):
color = (255, 255, 255) if np.argmax(notes) == i else bin2color(i, 24)
#color = bin2color(i, 24)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, 0, per_note - 1, note * 2000))
# Draw filtered note distribution
amp = 4000
per_note = window_w // len(filtered_notes)
# Calc line points
coords = [(per_note * i + 0.5 * per_note, note * amp) for i, note in enumerate(filtered_notes)]
# Loop around edges
coords = [(-1.5 * per_note, filtered_notes[23] * amp)] + coords + [((len(filtered_notes) + 1.5) * per_note, filtered_notes[0] * amp)]
pygame.draw.lines(screen, (255, 255, 255), False, coords)
# Draw peak points
for (idx, amplitude) in peaks:
pygame.draw.circle(screen, bin2color(idx, 24), (int(per_note * idx + .5 * per_note), int(amplitude * amp)), int(amplitude * 1000))
# Draw complete dft
per_note = window_w // len(avged_dfts)
for i, note in enumerate(avged_dfts):
color = bin2color(i, 24)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, window_h, per_note - 1, note * -2000))
# Draw actual notes
per_note = window_w // len(actual_notes)
for i, note in enumerate(actual_notes):
color = bin2color(i, 12)
pygame.draw.rect(screen, color, pygame.Rect(i * per_note + 1, window_h - 100, per_note - 1, note * -100000))
end_time = time.time()
print('Total time', end_time - start_time, 1 / (end_time - start_time), 'Fps')
pygame.display.flip()
# Cleanup
stream.stop_stream()
stream.close()
p.terminate() | 0.5083 | 0.332581 |
import numpy as np
import pandas as pd
from avato import Client
from avato import Secret
from avato_training import Training_Instance, Configuration
import time
data_filenames = ("test-data/wine-dataowner1.csv", "test-data/wine-dataowner2.csv")
expected_measurement = "71b81c5d4a1879fd75905bd207b079274fdcd095f2ff145d0b560574f5733df3"
backend_host = "api.decentriq.ch"
backend_port = 15005
def load_data():
Xy = np.array(
pd.concat([
pd.read_csv(data_filenames[0]),
pd.read_csv(data_filenames[1])
])
);
X = Xy[:,0:-1]
y = Xy[:,-1]
return X, y
def compute_accuracy(classifier, X, y):
y_hat = classifier.predict(X)
assert len(y) == len(y_hat)
n = len(y)
n_eq = 0
for yi, yi_hat in zip(y, y_hat):
if float(yi) == float(yi_hat):
n_eq = n_eq + 1
accuracy = float(n_eq)/n
return accuracy
def analyst_set_up_instance(analyst_api_token, analyst_password, data_owner_usernames, feature_columns, label_column):
# Create client.
analyst_client = Client(
api_token=analyst_api_token,
instance_types=[Training_Instance],
backend_host=backend_host,
backend_port=backend_port,
use_ssl=True
)
# Spin up an instance. Set who can participate in the instance.
analyst_instance = analyst_client.create_instance(
"Training",
Training_Instance.type,
data_owner_usernames
)
print("Created Instance with ID: {}".format(analyst_instance.id))
analyst_instance.validate_fatquote(
expected_measurement=expected_measurement,
accept_debug=True,
accept_group_out_of_date=True
)
# Set the configuration
configuration = Configuration(
feature_columns=feature_columns,
label_column=label_column,
password=<PASSWORD>
)
print("\nConfigured instance with feature columns \n{}\n and label column \n{}".format(
"\n".join([" '{}'".format(c) for c in configuration.feature_columns]),
" '{}'".format(configuration.label_column)
))
# Create and set public-private keypair for secure communication.
analyst_secret = Secret()
analyst_instance.set_secret(analyst_secret)
# Upload
analyst_instance.upload_configuration(configuration)
return analyst_instance
# This function submits for a given data owner a data file to the instance.
def data_owner_submit_data(dataowner_api_token, instance_id, data_file):
# Create client
data_owner_client = Client(
api_token=dataowner_api_token,
instance_types=[Training_Instance],
backend_host=backend_host,
backend_port=backend_port,
use_ssl=True
)
# Connect to instance (using ID from the analyst user)
data_owner_instance = data_owner_client.get_instance(instance_id)
# Check security guarantees.
data_owner_instance.validate_fatquote(
expected_measurement=expected_measurement,
accept_debug=True,
accept_group_out_of_date=True
)
print("Verifying security...")
time.sleep(0.3)
print("... signature verified.")
time.sleep(0.3)
print("... code hash verified.")
time.sleep(0.3)
print("Security successfully verified.\n")
# Create and set public-private keypair for secure communication.
data_owner_instance.set_secret(Secret())
print("Created random keypair for e2e encryption.\n")
# Get data format from the enclave
data_format = data_owner_instance.get_data_format()
# print("Data format:\n{}".format(data_format))
# Load data
df = pd.read_csv(data_file)
print("Loaded data:")
print(df.head(2))
# Submit data
print("\nEncrypting data...")
time.sleep(0.3)
print("Submitting encrypted data...")
time.sleep(0.3)
(ingested_rows, failed_rows) = data_owner_instance.submit_data(df)
print("\nNumber of successfully ingested rows: {}, number of failed rows: {}".format(ingested_rows, len(failed_rows))) | examples/example.py | import numpy as np
import pandas as pd
from avato import Client
from avato import Secret
from avato_training import Training_Instance, Configuration
import time
data_filenames = ("test-data/wine-dataowner1.csv", "test-data/wine-dataowner2.csv")
expected_measurement = "71b81c5d4a1879fd75905bd207b079274fdcd095f2ff145d0b560574f5733df3"
backend_host = "api.decentriq.ch"
backend_port = 15005
def load_data():
Xy = np.array(
pd.concat([
pd.read_csv(data_filenames[0]),
pd.read_csv(data_filenames[1])
])
);
X = Xy[:,0:-1]
y = Xy[:,-1]
return X, y
def compute_accuracy(classifier, X, y):
y_hat = classifier.predict(X)
assert len(y) == len(y_hat)
n = len(y)
n_eq = 0
for yi, yi_hat in zip(y, y_hat):
if float(yi) == float(yi_hat):
n_eq = n_eq + 1
accuracy = float(n_eq)/n
return accuracy
def analyst_set_up_instance(analyst_api_token, analyst_password, data_owner_usernames, feature_columns, label_column):
# Create client.
analyst_client = Client(
api_token=analyst_api_token,
instance_types=[Training_Instance],
backend_host=backend_host,
backend_port=backend_port,
use_ssl=True
)
# Spin up an instance. Set who can participate in the instance.
analyst_instance = analyst_client.create_instance(
"Training",
Training_Instance.type,
data_owner_usernames
)
print("Created Instance with ID: {}".format(analyst_instance.id))
analyst_instance.validate_fatquote(
expected_measurement=expected_measurement,
accept_debug=True,
accept_group_out_of_date=True
)
# Set the configuration
configuration = Configuration(
feature_columns=feature_columns,
label_column=label_column,
password=<PASSWORD>
)
print("\nConfigured instance with feature columns \n{}\n and label column \n{}".format(
"\n".join([" '{}'".format(c) for c in configuration.feature_columns]),
" '{}'".format(configuration.label_column)
))
# Create and set public-private keypair for secure communication.
analyst_secret = Secret()
analyst_instance.set_secret(analyst_secret)
# Upload
analyst_instance.upload_configuration(configuration)
return analyst_instance
# This function submits for a given data owner a data file to the instance.
def data_owner_submit_data(dataowner_api_token, instance_id, data_file):
# Create client
data_owner_client = Client(
api_token=dataowner_api_token,
instance_types=[Training_Instance],
backend_host=backend_host,
backend_port=backend_port,
use_ssl=True
)
# Connect to instance (using ID from the analyst user)
data_owner_instance = data_owner_client.get_instance(instance_id)
# Check security guarantees.
data_owner_instance.validate_fatquote(
expected_measurement=expected_measurement,
accept_debug=True,
accept_group_out_of_date=True
)
print("Verifying security...")
time.sleep(0.3)
print("... signature verified.")
time.sleep(0.3)
print("... code hash verified.")
time.sleep(0.3)
print("Security successfully verified.\n")
# Create and set public-private keypair for secure communication.
data_owner_instance.set_secret(Secret())
print("Created random keypair for e2e encryption.\n")
# Get data format from the enclave
data_format = data_owner_instance.get_data_format()
# print("Data format:\n{}".format(data_format))
# Load data
df = pd.read_csv(data_file)
print("Loaded data:")
print(df.head(2))
# Submit data
print("\nEncrypting data...")
time.sleep(0.3)
print("Submitting encrypted data...")
time.sleep(0.3)
(ingested_rows, failed_rows) = data_owner_instance.submit_data(df)
print("\nNumber of successfully ingested rows: {}, number of failed rows: {}".format(ingested_rows, len(failed_rows))) | 0.58948 | 0.358269 |
from copy import copy
import random
import os
import psycopg2
__all__ = ['Database']
connection = psycopg2.connect(
host=os.getenv('POSTGRES_HOST'),
port=os.getenv('POSTGRES_PORT'),
database=os.getenv('POSTGRES_DATABASE'),
user=os.getenv('POSTGRES_USER'),
password=os.getenv('POSTGRES_PASSWORD'))
class Model:
@classmethod
def _insert(cls, table, data, update_data, constraint):
columns_count = len(data)
columns, values = zip(*data)
columns = map(str, columns)
values = list(map(str, values))
if update_data == None:
conflict_query = 'DO NOTHING'
else:
update_columns, update_values = zip(*update_data)
strs = map(lambda _: '{} = %s'.format(_), update_columns)
values.extend(update_values)
conflict_query = '({}) DO UPDATE SET {}'.format(','.join(constraint), ', '.join(strs))
query = 'INSERT INTO {} ({}) VALUES ({}) ON CONFLICT {}'.format(table,
','.join(columns), ','.join(['%s'] * columns_count),
conflict_query)
connection.cursor().execute(query, values)
@classmethod
def insert_one(cls, table, data, update_data=None, constraint=None):
cls._insert(table, data, update_data, constraint)
connection.commit()
@classmethod
def insert_all(cls, table, data_list, update_data=None, constraint=None):
for data in data_list:
cls._insert(table, data, update_data, constraint)
connection.commit()
@classmethod
def commit(cls, query, params):
db = connection.cursor()
db.execute(query, params)
connection.commit()
@classmethod
def fetch_all(cls, query, params):
db = connection.cursor()
db.execute(query, params)
return db.fetchall()
@classmethod
def fetch_one(cls, query, params):
db = connection.cursor()
db.execute(query, params)
return db.fetchone()
class User(Model):
def __init__(self, id, first_name, second_name, telegram_id):
self._id = id
self._first_name = first_name
self._second_name = second_name
self._telegram_id = telegram_id
@property
def id(self):
return self._id
@property
def first_name(self):
return self._first_name
@property
def second_name(self):
return self._second_name
@property
def telegram_id(self):
return self._telegram_id
def is_approved(self):
query = 'SELECT is_approved FROM approved WHERE user_id = %s'
row = self.fetch_one(query, [self.id])
return False if row is None else row[0]
def is_admin(self):
query = 'SELECT is_admin FROM users WHERE id = %s'
row = self.fetch_one(query, [self.id])
return row[0]
@classmethod
def from_id(cls, user_id):
query = 'SELECT id, first_name, second_name, telegram_id FROM users WHERE id = %s'
row = cls.fetch_one(query, [user_id])
if row is None:
return None
return cls(*row)
@classmethod
def from_telegram_id(cls, telegram_id):
query = 'SELECT id, first_name, second_name, telegram_id FROM users WHERE telegram_id = %s'
row = cls.fetch_one(query, [telegram_id])
if row is None:
return None
return cls(*row)
class Event(Model):
def __init__(self, id, name):
self._id = id
self._name = name
@classmethod
def from_id(cls, event_id):
query = 'SELECT id, name FROM events WHERE id = %s'
row = cls.fetch_one(query, [event_id])
if row is None:
return None
return cls(*row)
@property
def id(self):
return self._id
def was_build(self):
query = 'SELECT from_id FROM victims WHERE event_id = %s'
row = self.fetch_one(query, [self.id])
return row != None
def _check_build(self, from_ids, to_ids):
for from_id, to_id in zip(from_ids, to_ids):
if from_id == to_id:
return False
return True
def build(self):
query = 'SELECT user_id FROM participants WHERE event_id = %s'
rows = self.fetch_all(query, [self.id])
from_ids = [_[0] for _ in rows]
while True:
to_ids = copy(from_ids)
random.shuffle(to_ids)
if self._check_build(from_ids, to_ids):
break
data_list = [[('event_id', self.id), ('from_id', _[0]), ('to_id', _[1])] for _ in zip(from_ids, to_ids)]
self.insert_all('victims', data_list)
return list(zip(from_ids, to_ids))
def add_participant(self, user):
query = 'INSERT INTO participants (event_id, user_id) VALUES (%s, %s) ON CONFLICT DO NOTHING'
self.commit(query, [self.id, user.id])
def get_participants(self):
query = 'SELECT user_id FROM participants WHERE event_id = %s'
rows = self.fetch_all(query, [self.id])
if rows == None:
return []
return [_[0] for _ in rows]
def has_participant(self, user):
query = 'SELECT event_id, user_id FROM participants WHERE event_id = %s AND user_id = %s'
row = self.fetch_one(query, [self.id, user.id])
return row != None
def find_victim(self, user):
query = 'SELECT to_id FROM victims WHERE event_id = %s AND from_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return User.from_id(row[0])
def find_santa(self, user):
query = 'SELECT from_id FROM victims WHERE event_id = %s AND to_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return User.from_id(row[0])
def find_interests(self, user):
query = 'SELECT interests FROM interests WHERE event_id = %s AND user_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return row[0]
def save_interests(self, user, interests):
query = 'INSERT INTO interests (event_id, user_id, interests) VALUES (%s, %s) ON CONFLICT DO UPDATE'
data = [('event_id', self.id), ('user_id', user.id), ('interests', interests)]
self.insert_one('interests', data, [('interests', interests)], ('event_id', 'user_id'))
class Database:
def create_user(self, telegram_user):
data = [('telegram_id', telegram_user.id),
('first_name', telegram_user.first_name),
('second_name', telegram_user.last_name)]
Model.insert_one('users', data)
return self.find_user(telegram_id=telegram_user.id)
def create_event(self, name):
data = [('name', name)]
Model.insert_one('events', data)
def find_user(self, user_id=None, telegram_id=None):
if user_id is not None:
return User.from_id(user_id)
elif telegram_id is not None:
return User.from_telegram_id(telegram_id)
return None
def find_event(self, event_id):
return Event.from_id(event_id)
def approve_user(self, user):
Model.insert_one('approved', [('user_id', user.id)]) | secret_santaclaus/database.py | from copy import copy
import random
import os
import psycopg2
__all__ = ['Database']
connection = psycopg2.connect(
host=os.getenv('POSTGRES_HOST'),
port=os.getenv('POSTGRES_PORT'),
database=os.getenv('POSTGRES_DATABASE'),
user=os.getenv('POSTGRES_USER'),
password=os.getenv('POSTGRES_PASSWORD'))
class Model:
@classmethod
def _insert(cls, table, data, update_data, constraint):
columns_count = len(data)
columns, values = zip(*data)
columns = map(str, columns)
values = list(map(str, values))
if update_data == None:
conflict_query = 'DO NOTHING'
else:
update_columns, update_values = zip(*update_data)
strs = map(lambda _: '{} = %s'.format(_), update_columns)
values.extend(update_values)
conflict_query = '({}) DO UPDATE SET {}'.format(','.join(constraint), ', '.join(strs))
query = 'INSERT INTO {} ({}) VALUES ({}) ON CONFLICT {}'.format(table,
','.join(columns), ','.join(['%s'] * columns_count),
conflict_query)
connection.cursor().execute(query, values)
@classmethod
def insert_one(cls, table, data, update_data=None, constraint=None):
cls._insert(table, data, update_data, constraint)
connection.commit()
@classmethod
def insert_all(cls, table, data_list, update_data=None, constraint=None):
for data in data_list:
cls._insert(table, data, update_data, constraint)
connection.commit()
@classmethod
def commit(cls, query, params):
db = connection.cursor()
db.execute(query, params)
connection.commit()
@classmethod
def fetch_all(cls, query, params):
db = connection.cursor()
db.execute(query, params)
return db.fetchall()
@classmethod
def fetch_one(cls, query, params):
db = connection.cursor()
db.execute(query, params)
return db.fetchone()
class User(Model):
def __init__(self, id, first_name, second_name, telegram_id):
self._id = id
self._first_name = first_name
self._second_name = second_name
self._telegram_id = telegram_id
@property
def id(self):
return self._id
@property
def first_name(self):
return self._first_name
@property
def second_name(self):
return self._second_name
@property
def telegram_id(self):
return self._telegram_id
def is_approved(self):
query = 'SELECT is_approved FROM approved WHERE user_id = %s'
row = self.fetch_one(query, [self.id])
return False if row is None else row[0]
def is_admin(self):
query = 'SELECT is_admin FROM users WHERE id = %s'
row = self.fetch_one(query, [self.id])
return row[0]
@classmethod
def from_id(cls, user_id):
query = 'SELECT id, first_name, second_name, telegram_id FROM users WHERE id = %s'
row = cls.fetch_one(query, [user_id])
if row is None:
return None
return cls(*row)
@classmethod
def from_telegram_id(cls, telegram_id):
query = 'SELECT id, first_name, second_name, telegram_id FROM users WHERE telegram_id = %s'
row = cls.fetch_one(query, [telegram_id])
if row is None:
return None
return cls(*row)
class Event(Model):
def __init__(self, id, name):
self._id = id
self._name = name
@classmethod
def from_id(cls, event_id):
query = 'SELECT id, name FROM events WHERE id = %s'
row = cls.fetch_one(query, [event_id])
if row is None:
return None
return cls(*row)
@property
def id(self):
return self._id
def was_build(self):
query = 'SELECT from_id FROM victims WHERE event_id = %s'
row = self.fetch_one(query, [self.id])
return row != None
def _check_build(self, from_ids, to_ids):
for from_id, to_id in zip(from_ids, to_ids):
if from_id == to_id:
return False
return True
def build(self):
query = 'SELECT user_id FROM participants WHERE event_id = %s'
rows = self.fetch_all(query, [self.id])
from_ids = [_[0] for _ in rows]
while True:
to_ids = copy(from_ids)
random.shuffle(to_ids)
if self._check_build(from_ids, to_ids):
break
data_list = [[('event_id', self.id), ('from_id', _[0]), ('to_id', _[1])] for _ in zip(from_ids, to_ids)]
self.insert_all('victims', data_list)
return list(zip(from_ids, to_ids))
def add_participant(self, user):
query = 'INSERT INTO participants (event_id, user_id) VALUES (%s, %s) ON CONFLICT DO NOTHING'
self.commit(query, [self.id, user.id])
def get_participants(self):
query = 'SELECT user_id FROM participants WHERE event_id = %s'
rows = self.fetch_all(query, [self.id])
if rows == None:
return []
return [_[0] for _ in rows]
def has_participant(self, user):
query = 'SELECT event_id, user_id FROM participants WHERE event_id = %s AND user_id = %s'
row = self.fetch_one(query, [self.id, user.id])
return row != None
def find_victim(self, user):
query = 'SELECT to_id FROM victims WHERE event_id = %s AND from_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return User.from_id(row[0])
def find_santa(self, user):
query = 'SELECT from_id FROM victims WHERE event_id = %s AND to_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return User.from_id(row[0])
def find_interests(self, user):
query = 'SELECT interests FROM interests WHERE event_id = %s AND user_id = %s'
row = self.fetch_one(query, [self.id, user.id])
if row is None:
return None
return row[0]
def save_interests(self, user, interests):
query = 'INSERT INTO interests (event_id, user_id, interests) VALUES (%s, %s) ON CONFLICT DO UPDATE'
data = [('event_id', self.id), ('user_id', user.id), ('interests', interests)]
self.insert_one('interests', data, [('interests', interests)], ('event_id', 'user_id'))
class Database:
def create_user(self, telegram_user):
data = [('telegram_id', telegram_user.id),
('first_name', telegram_user.first_name),
('second_name', telegram_user.last_name)]
Model.insert_one('users', data)
return self.find_user(telegram_id=telegram_user.id)
def create_event(self, name):
data = [('name', name)]
Model.insert_one('events', data)
def find_user(self, user_id=None, telegram_id=None):
if user_id is not None:
return User.from_id(user_id)
elif telegram_id is not None:
return User.from_telegram_id(telegram_id)
return None
def find_event(self, event_id):
return Event.from_id(event_id)
def approve_user(self, user):
Model.insert_one('approved', [('user_id', user.id)]) | 0.488039 | 0.115312 |
from cogs.Permissions import command_channels, dm_commands
from discord.ext import commands
from typing import Dict, List
import discord
import random
import os
hangman_embed = discord.Embed(
title="Reaction Hangman",
color=discord.Color.red()
).set_footer(text='Tip: search "regional" in the reaction menu')
bomb = ":anger:"
numbers = [
":zero:",
":one:",
":two:",
":three:",
":four:",
":five:",
":six:",
":seven:",
":eight:",
":nine:"
]
class HangmanGame: # Credit: https://github.com/TheUnlocked
"""
Reaction Hangman Game Instance
"""
word: str
visible: str
errors: int
guesses: List[str]
def __init__(self, word):
# print(word)
self.word = word
self.guesses = []
self.visible = '*' * len(word)
self.errors = 0
def guess(self, letter):
if letter not in self.guesses:
self.guesses.append(letter)
self.update_status()
def update_status(self):
self.errors = len([c for c in self.guesses if c not in self.word])
if self.errors > 5:
self.visible = self.word
else:
self.visible = ''.join('*' if c not in self.guesses else c for c in self.word)
class Games(commands.Cog):
hangman_games: Dict[str, HangmanGame]
hangman_words: List[str]
def __init__(self, bot):
self.bot = bot
self.hangman_games = {}
self.hangman_words = []
@commands.Cog.listener()
async def on_ready(self):
await self.load_dict()
# Hangman (Credit: https://github.com/TheUnlocked)
async def load_dict(self):
with open(os.path.join("config", "dictionary.txt"), "r") as dictionary:
# fewer than 6 letters doesn't make for as fun of a game!
self.hangman_words = [s.lower() for s in dictionary.read().splitlines() if len(s) >= 6]
@commands.command(pass_context=True, name="hangman")
@commands.check(command_channels)
async def hangman(self, ctx):
"""
Starts a game of hangman
Usage: .hangman
:param ctx: context object
"""
hangman = HangmanGame(random.choice(self.hangman_words))
msg = await ctx.send(embed=self.render_hangman_embed(hangman))
self.hangman_games[msg.id] = hangman
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
"""
Adds a guess and updates game state
"""
guild = self.bot.get_guild(payload.guild_id)
if payload.message_id in self.hangman_games and guild is not None and len(payload.emoji.name) == 1:
letter = chr(ord(payload.emoji.name) - 127365)
if 'a' <= letter <= 'z':
channel = guild.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
hangman = self.hangman_games[payload.message_id]
hangman.guess(letter)
if '*' not in hangman.visible:
del self.hangman_games[payload.message_id]
await message.edit(embed=self.render_hangman_embed(hangman))
await message.clear_reaction(payload.emoji)
def render_hangman_embed(self, hangman: HangmanGame):
"""
Generates a new embed representing the state of the hangman game
"""
global hangman_embed
embed = hangman_embed.copy()
head = '()' if hangman.errors > 0 else ' '
torso = '||' if hangman.errors > 1 else ' '
left_arm = '/' if hangman.errors > 2 else ' '
right_arm = '\\' if hangman.errors > 3 else ' '
left_leg = '/' if hangman.errors > 4 else ' '
right_leg = '\\' if hangman.errors > 5 else ' '
diagram = f"``` {head}\n{left_arm}{torso}{right_arm}\n {left_leg}{right_leg}```"
embed.add_field(name="Diagram", value=diagram)
embed.add_field(
name="Word",
value=' '.join("🟦" if c == '*' else chr(ord(c) + 127365) for c in hangman.visible)
)
# padding
embed.add_field(name="\u200b", value="\u200b")
if len(hangman.guesses) > 0:
embed.add_field(name="Guesses", value=' '.join(chr(ord(c) + 127365) for c in hangman.guesses) + "\n\n\n")
if hangman.errors > 5:
embed.add_field(name="Result", value="You lose!")
elif '*' not in hangman.visible:
embed.add_field(name="Result", value="You win!")
return embed
# Minesweeper (Credit: https://github.com/TheUnlocked)
@commands.command(pass_context=True, name="minesweeper")
@commands.check(command_channels)
@commands.guild_only()
async def new_minesweeper(self, ctx, x_str="8", y_str="8"):
"""
Creates a new spoiler minesweeper game
Usage: .minesweeper (x) (y)
:param ctx: context object
:param x_str: (optional) x dimension
:param y_str: (optional) y dimension
"""
if not (x_str.isdecimal() and y_str.isdecimal()):
await ctx.send(
f"Either {x_str} or {y_str} is not a valid dimension for a minesweeper board. "
f"Please use the format `minesweeper x y`."
)
return
x = int(x_str)
y = int(y_str)
if x < 2 or y < 2:
await ctx.send(f"{x}x{y} is too small for a minesweeper board. The minimum dimensions are 2x2.")
return
elif x > 10 or y > 10:
await ctx.send(f"{x}x{y} is too large for a minesweeper board. The maximum dimensions are 10x10.")
return
bomb_count = min(x * y - 1, max(2, (x * y // 10) + random.randrange(0, x + y)))
board = self.create_board(x, y)
self.place_bombs(board, bomb_count)
self.place_neighbors(board)
if any(any(cell == numbers[0] for cell in row) for row in board):
while True:
y = random.choice(range(len(board)))
x = random.choice(range(len(board[y])))
if board[y][x] == numbers[0]:
board[y][x] = None
break
embed = discord.Embed(
title="Spoiler Minesweeper",
color=discord.Color.red()
).set_footer(text=f"Find all {bomb_count} bombs!")
embed.add_field(
name="Minefield",
value="\n".join(["".join([numbers[0] if cell is None else f"||{cell}||" for cell in row]) for row in board])
)
await ctx.send(embed=embed)
def create_board(self, x, y):
return [[None for _ in range(x)] for _ in range(y)]
def place_bombs(self, board, num):
while num > 0:
y = random.choice(range(len(board)))
x = random.choice(range(len(board[y])))
if board[y][x] is not None:
continue
board[y][x] = bomb
num -= 1
def place_neighbors(self, board):
for y in range(len(board)):
for x in range(len(board[y])):
if board[y][x] == bomb:
continue
ct = 0
for c in range(-1, 2):
for r in range(-1, 2):
if 0 <= x + c < len(board[y]) and 0 <= y + r < len(board) and board[y + r][x + c] == bomb:
ct += 1
board[y][x] = numbers[ct]
# Dice
@commands.command(pass_context=True)
@commands.check(dm_commands)
async def roll(self, ctx, number):
"""
Rolls a die for the number
Usage: .roll <number>
:param ctx: context object
:param number: number of sides on the die
"""
if "d" in number:
sides = 0
try:
if number[:number.find("d")] != "":
dice = int(number[:number.find("d")])
else:
dice = 1
sides = int(number[number.find("d") + 1:])
except ValueError:
await ctx.send("Could not parse this roll")
return
if dice < 1 or sides < 1:
await ctx.send("Not a valid number of dice/sides")
return
total = 0
response = " ("
for i in range(0, dice):
roll_num = random.randint(1, sides)
total += roll_num
response += str(roll_num)
if i == dice - 1:
break
response += " + "
response += " = " + str(total) + ")"
if dice == 1:
response = ""
if ctx.author.nick is not None:
response = ctx.author.nick.replace("@", "") + " rolled a " + str(total) + "!" + response
else:
response = ctx.author.name.replace("@", "") + " rolled a " + str(total) + "!" + response
if len(response) > 500:
await ctx.send(response[:response.find("(") - 1])
else:
await ctx.send(response)
else:
try:
sides = int(number)
except ValueError:
await ctx.send("Could not parse this roll")
return
if sides < 1:
await ctx.send("Not a valid number of sides")
return
if ctx.author.nick is not None:
await ctx.send(ctx.author.nick.replace("@", "") + " rolled a " + str(random.randint(1, sides)) + "!")
else:
await ctx.send(ctx.author.name.replace("@", "") + " rolled a " + str(random.randint(1, sides)) + "!") | cogs/Games.py | from cogs.Permissions import command_channels, dm_commands
from discord.ext import commands
from typing import Dict, List
import discord
import random
import os
hangman_embed = discord.Embed(
title="Reaction Hangman",
color=discord.Color.red()
).set_footer(text='Tip: search "regional" in the reaction menu')
bomb = ":anger:"
numbers = [
":zero:",
":one:",
":two:",
":three:",
":four:",
":five:",
":six:",
":seven:",
":eight:",
":nine:"
]
class HangmanGame: # Credit: https://github.com/TheUnlocked
"""
Reaction Hangman Game Instance
"""
word: str
visible: str
errors: int
guesses: List[str]
def __init__(self, word):
# print(word)
self.word = word
self.guesses = []
self.visible = '*' * len(word)
self.errors = 0
def guess(self, letter):
if letter not in self.guesses:
self.guesses.append(letter)
self.update_status()
def update_status(self):
self.errors = len([c for c in self.guesses if c not in self.word])
if self.errors > 5:
self.visible = self.word
else:
self.visible = ''.join('*' if c not in self.guesses else c for c in self.word)
class Games(commands.Cog):
hangman_games: Dict[str, HangmanGame]
hangman_words: List[str]
def __init__(self, bot):
self.bot = bot
self.hangman_games = {}
self.hangman_words = []
@commands.Cog.listener()
async def on_ready(self):
await self.load_dict()
# Hangman (Credit: https://github.com/TheUnlocked)
async def load_dict(self):
with open(os.path.join("config", "dictionary.txt"), "r") as dictionary:
# fewer than 6 letters doesn't make for as fun of a game!
self.hangman_words = [s.lower() for s in dictionary.read().splitlines() if len(s) >= 6]
@commands.command(pass_context=True, name="hangman")
@commands.check(command_channels)
async def hangman(self, ctx):
"""
Starts a game of hangman
Usage: .hangman
:param ctx: context object
"""
hangman = HangmanGame(random.choice(self.hangman_words))
msg = await ctx.send(embed=self.render_hangman_embed(hangman))
self.hangman_games[msg.id] = hangman
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
"""
Adds a guess and updates game state
"""
guild = self.bot.get_guild(payload.guild_id)
if payload.message_id in self.hangman_games and guild is not None and len(payload.emoji.name) == 1:
letter = chr(ord(payload.emoji.name) - 127365)
if 'a' <= letter <= 'z':
channel = guild.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
hangman = self.hangman_games[payload.message_id]
hangman.guess(letter)
if '*' not in hangman.visible:
del self.hangman_games[payload.message_id]
await message.edit(embed=self.render_hangman_embed(hangman))
await message.clear_reaction(payload.emoji)
def render_hangman_embed(self, hangman: HangmanGame):
"""
Generates a new embed representing the state of the hangman game
"""
global hangman_embed
embed = hangman_embed.copy()
head = '()' if hangman.errors > 0 else ' '
torso = '||' if hangman.errors > 1 else ' '
left_arm = '/' if hangman.errors > 2 else ' '
right_arm = '\\' if hangman.errors > 3 else ' '
left_leg = '/' if hangman.errors > 4 else ' '
right_leg = '\\' if hangman.errors > 5 else ' '
diagram = f"``` {head}\n{left_arm}{torso}{right_arm}\n {left_leg}{right_leg}```"
embed.add_field(name="Diagram", value=diagram)
embed.add_field(
name="Word",
value=' '.join("🟦" if c == '*' else chr(ord(c) + 127365) for c in hangman.visible)
)
# padding
embed.add_field(name="\u200b", value="\u200b")
if len(hangman.guesses) > 0:
embed.add_field(name="Guesses", value=' '.join(chr(ord(c) + 127365) for c in hangman.guesses) + "\n\n\n")
if hangman.errors > 5:
embed.add_field(name="Result", value="You lose!")
elif '*' not in hangman.visible:
embed.add_field(name="Result", value="You win!")
return embed
# Minesweeper (Credit: https://github.com/TheUnlocked)
@commands.command(pass_context=True, name="minesweeper")
@commands.check(command_channels)
@commands.guild_only()
async def new_minesweeper(self, ctx, x_str="8", y_str="8"):
"""
Creates a new spoiler minesweeper game
Usage: .minesweeper (x) (y)
:param ctx: context object
:param x_str: (optional) x dimension
:param y_str: (optional) y dimension
"""
if not (x_str.isdecimal() and y_str.isdecimal()):
await ctx.send(
f"Either {x_str} or {y_str} is not a valid dimension for a minesweeper board. "
f"Please use the format `minesweeper x y`."
)
return
x = int(x_str)
y = int(y_str)
if x < 2 or y < 2:
await ctx.send(f"{x}x{y} is too small for a minesweeper board. The minimum dimensions are 2x2.")
return
elif x > 10 or y > 10:
await ctx.send(f"{x}x{y} is too large for a minesweeper board. The maximum dimensions are 10x10.")
return
bomb_count = min(x * y - 1, max(2, (x * y // 10) + random.randrange(0, x + y)))
board = self.create_board(x, y)
self.place_bombs(board, bomb_count)
self.place_neighbors(board)
if any(any(cell == numbers[0] for cell in row) for row in board):
while True:
y = random.choice(range(len(board)))
x = random.choice(range(len(board[y])))
if board[y][x] == numbers[0]:
board[y][x] = None
break
embed = discord.Embed(
title="Spoiler Minesweeper",
color=discord.Color.red()
).set_footer(text=f"Find all {bomb_count} bombs!")
embed.add_field(
name="Minefield",
value="\n".join(["".join([numbers[0] if cell is None else f"||{cell}||" for cell in row]) for row in board])
)
await ctx.send(embed=embed)
def create_board(self, x, y):
return [[None for _ in range(x)] for _ in range(y)]
def place_bombs(self, board, num):
while num > 0:
y = random.choice(range(len(board)))
x = random.choice(range(len(board[y])))
if board[y][x] is not None:
continue
board[y][x] = bomb
num -= 1
def place_neighbors(self, board):
for y in range(len(board)):
for x in range(len(board[y])):
if board[y][x] == bomb:
continue
ct = 0
for c in range(-1, 2):
for r in range(-1, 2):
if 0 <= x + c < len(board[y]) and 0 <= y + r < len(board) and board[y + r][x + c] == bomb:
ct += 1
board[y][x] = numbers[ct]
# Dice
@commands.command(pass_context=True)
@commands.check(dm_commands)
async def roll(self, ctx, number):
"""
Rolls a die for the number
Usage: .roll <number>
:param ctx: context object
:param number: number of sides on the die
"""
if "d" in number:
sides = 0
try:
if number[:number.find("d")] != "":
dice = int(number[:number.find("d")])
else:
dice = 1
sides = int(number[number.find("d") + 1:])
except ValueError:
await ctx.send("Could not parse this roll")
return
if dice < 1 or sides < 1:
await ctx.send("Not a valid number of dice/sides")
return
total = 0
response = " ("
for i in range(0, dice):
roll_num = random.randint(1, sides)
total += roll_num
response += str(roll_num)
if i == dice - 1:
break
response += " + "
response += " = " + str(total) + ")"
if dice == 1:
response = ""
if ctx.author.nick is not None:
response = ctx.author.nick.replace("@", "") + " rolled a " + str(total) + "!" + response
else:
response = ctx.author.name.replace("@", "") + " rolled a " + str(total) + "!" + response
if len(response) > 500:
await ctx.send(response[:response.find("(") - 1])
else:
await ctx.send(response)
else:
try:
sides = int(number)
except ValueError:
await ctx.send("Could not parse this roll")
return
if sides < 1:
await ctx.send("Not a valid number of sides")
return
if ctx.author.nick is not None:
await ctx.send(ctx.author.nick.replace("@", "") + " rolled a " + str(random.randint(1, sides)) + "!")
else:
await ctx.send(ctx.author.name.replace("@", "") + " rolled a " + str(random.randint(1, sides)) + "!") | 0.63375 | 0.552419 |
import sys, os, time, re, json
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.request import quote
team = "ht"
#選手の詳細ページのURLを取得する関数
def get_player_page_url_list():
endpoint = 'http://hanshintigers.jp/data/player/2018/'
request = endpoint
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
#画像のURLを入れるリストを準備
url_list = []
#選手の詳細ページのURLを抜き取る
for player in html.find_all(attrs={"class":"player-list-name"}):
for a_tag in player.find_all("a"):
url = a_tag.get("href")
url_list.append("http://hanshintigers.jp/data/player/2018/" + url)
#デバック
print("[Get] player page url")
#画像のURLのリストを返す
return url_list
#画像のURLを取得する関数
def get_player_image_url(player_page_url):
request = player_page_url
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
#画像のURLを抜き取る
for image in html.find_all(attrs={"class":"clearfix"}):
for img_tag in image.find_all("img"):
url = img_tag.get("src")
if url != [] and url.find("jpg") != -1:
num = re.search(r'../../img/player_images2018/(.+).jpg', url)
num = num.group(1)
url = "http://hanshintigers.jp/data/img/player_images2018/" + num + ".jpg"
return url
else:
return []
#選手データを取得する関数
def get_player_data(player_page_url):
request = player_page_url
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
player_data = []
#選手名を抜き取る
for table in html.find_all(attrs={"class":"clearfix"}):
span_tag = table.find("span")
if span_tag.find(id="full-name") != -1:
player_data.append(span_tag.string)
break
#選手データを抜き取る
for table in html.find_all(attrs={"class":"clearfix"}):
for td_tag in table.find_all("td"):
player_data.append(td_tag.string)
return player_data
#画像の名前を取得する関数
def get_image_name(img_url):
img_name = re.search(r'http://hanshintigers.jp/data/img/player_images2018/(.+).jpg', img_url)
img_name = img_name.group(1)
return img_name
#画像をダウンロードする関数
def download_image(img_url):
#画像の名前を取得
img_name = get_image_name(img_url)
save_dir = "./image/" + team + "/"
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
save_path = save_dir + img_name + ".jpg"
try:
#写真をダウンロード
urlretrieve(img_url, save_path)
#1秒スリープ
time.sleep(1)
#デバック
print("[Download] {0}.jpg".format(img_name))
except Exception as e:
#デバック
print("[Error] {0}".format(img_url))
#選手データのリストを辞書に変換する関数
def convert_to_dic(player_data, file_name):
#辞書を準備
player_dic = {}
key_list = ["name", "birthday", "age", "hw", "pb", "home", "graduate", "year"]
for (key, player) in zip(key_list, player_data):
player_dic[key] = player
save_dir = "./data/" + team + "/"
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
#jsonファイルを保存するパス
save_path = save_dir + file_name + ".json"
#辞書をjsonで保存
with open(save_path, "w") as f:
player_json = json.dumps(player_dic)
json.dump(player_json, f)
#デバック
print("[Save] {0}".format(save_path))
return player_dic
if __name__ == '__main__':
#選手の詳細ページのURLを取得
player_page_url_list = get_player_page_url_list()
for player_page_url in player_page_url_list:
#画像のURLを取得
player_image_url = get_player_image_url(player_page_url)
if player_image_url != None:
#画像をダウンロード
#download_image(player_image_url)
#選手データを取得
player_data = get_player_data(player_page_url)
print(player_data)
#画像の名前を取得
file_name = get_image_name(player_image_url)
#選手データのリストを辞書に変換
player_dic = convert_to_dic(player_data, file_name) | npb/ht.py | import sys, os, time, re, json
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.request import urlretrieve
from urllib.request import quote
team = "ht"
#選手の詳細ページのURLを取得する関数
def get_player_page_url_list():
endpoint = 'http://hanshintigers.jp/data/player/2018/'
request = endpoint
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
#画像のURLを入れるリストを準備
url_list = []
#選手の詳細ページのURLを抜き取る
for player in html.find_all(attrs={"class":"player-list-name"}):
for a_tag in player.find_all("a"):
url = a_tag.get("href")
url_list.append("http://hanshintigers.jp/data/player/2018/" + url)
#デバック
print("[Get] player page url")
#画像のURLのリストを返す
return url_list
#画像のURLを取得する関数
def get_player_image_url(player_page_url):
request = player_page_url
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
#画像のURLを抜き取る
for image in html.find_all(attrs={"class":"clearfix"}):
for img_tag in image.find_all("img"):
url = img_tag.get("src")
if url != [] and url.find("jpg") != -1:
num = re.search(r'../../img/player_images2018/(.+).jpg', url)
num = num.group(1)
url = "http://hanshintigers.jp/data/img/player_images2018/" + num + ".jpg"
return url
else:
return []
#選手データを取得する関数
def get_player_data(player_page_url):
request = player_page_url
response = urlopen(request)
resources = response.read()
html = BeautifulSoup(resources, "html.parser")
player_data = []
#選手名を抜き取る
for table in html.find_all(attrs={"class":"clearfix"}):
span_tag = table.find("span")
if span_tag.find(id="full-name") != -1:
player_data.append(span_tag.string)
break
#選手データを抜き取る
for table in html.find_all(attrs={"class":"clearfix"}):
for td_tag in table.find_all("td"):
player_data.append(td_tag.string)
return player_data
#画像の名前を取得する関数
def get_image_name(img_url):
img_name = re.search(r'http://hanshintigers.jp/data/img/player_images2018/(.+).jpg', img_url)
img_name = img_name.group(1)
return img_name
#画像をダウンロードする関数
def download_image(img_url):
#画像の名前を取得
img_name = get_image_name(img_url)
save_dir = "./image/" + team + "/"
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
save_path = save_dir + img_name + ".jpg"
try:
#写真をダウンロード
urlretrieve(img_url, save_path)
#1秒スリープ
time.sleep(1)
#デバック
print("[Download] {0}.jpg".format(img_name))
except Exception as e:
#デバック
print("[Error] {0}".format(img_url))
#選手データのリストを辞書に変換する関数
def convert_to_dic(player_data, file_name):
#辞書を準備
player_dic = {}
key_list = ["name", "birthday", "age", "hw", "pb", "home", "graduate", "year"]
for (key, player) in zip(key_list, player_data):
player_dic[key] = player
save_dir = "./data/" + team + "/"
#ファイルを保存するディレクトリを作成
if not os.path.exists(save_dir): os.makedirs(save_dir)
#jsonファイルを保存するパス
save_path = save_dir + file_name + ".json"
#辞書をjsonで保存
with open(save_path, "w") as f:
player_json = json.dumps(player_dic)
json.dump(player_json, f)
#デバック
print("[Save] {0}".format(save_path))
return player_dic
if __name__ == '__main__':
#選手の詳細ページのURLを取得
player_page_url_list = get_player_page_url_list()
for player_page_url in player_page_url_list:
#画像のURLを取得
player_image_url = get_player_image_url(player_page_url)
if player_image_url != None:
#画像をダウンロード
#download_image(player_image_url)
#選手データを取得
player_data = get_player_data(player_page_url)
print(player_data)
#画像の名前を取得
file_name = get_image_name(player_image_url)
#選手データのリストを辞書に変換
player_dic = convert_to_dic(player_data, file_name) | 0.109753 | 0.087097 |
# standard library
from typing import Union
# first-party
from tcex.api.tc.v3.api_endpoints import ApiEndpoints
from tcex.api.tc.v3.object_abc import ObjectABC
from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC
from tcex.api.tc.v3.security.users.user_filter import UserFilter
from tcex.api.tc.v3.security.users.user_model import UserModel, UsersModel
class Users(ObjectCollectionABC):
"""Users Collection.
# Example of params input
{
'result_limit': 100, # Limit the retrieved results.
'result_start': 10, # Starting count used for pagination.
'fields': ['caseId', 'summary'] # Select additional return fields.
}
Args:
session (Session): Session object configured with TC API Auth.
tql_filters (list): List of TQL filters.
params (dict): Additional query params (see example above).
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(
kwargs.pop('session', None), kwargs.pop('tql_filter', None), kwargs.pop('params', None)
)
self._model = UsersModel(**kwargs)
self.type_ = 'users'
def __iter__(self) -> 'User':
"""Iterate over CM objects."""
return self.iterate(base_class=User)
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.USERS.value
@property
def filter(self) -> 'UserFilter':
"""Return the type specific filter object."""
return UserFilter(self.tql)
class User(ObjectABC):
"""Users Object.
Args:
user_name (str, kwargs): The **user name** for the User.
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(kwargs.pop('session', None))
# properties
self._model = UserModel(**kwargs)
self._nested_field_name = 'users'
self._nested_filter = 'has_user'
self.type_ = 'User'
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.USERS.value
@property
def model(self) -> 'UserModel':
"""Return the model data."""
return self._model
@model.setter
def model(self, data: Union['UserModel', dict]) -> None:
"""Create model using the provided data."""
if isinstance(data, type(self.model)):
# provided data is already a model, nothing required to change
self._model = data
elif isinstance(data, dict):
# provided data is raw response, load the model
self._model = type(self.model)(**data)
else:
raise RuntimeError(f'Invalid data type: {type(data)} provided.') | tcex/api/tc/v3/security/users/user.py | # standard library
from typing import Union
# first-party
from tcex.api.tc.v3.api_endpoints import ApiEndpoints
from tcex.api.tc.v3.object_abc import ObjectABC
from tcex.api.tc.v3.object_collection_abc import ObjectCollectionABC
from tcex.api.tc.v3.security.users.user_filter import UserFilter
from tcex.api.tc.v3.security.users.user_model import UserModel, UsersModel
class Users(ObjectCollectionABC):
"""Users Collection.
# Example of params input
{
'result_limit': 100, # Limit the retrieved results.
'result_start': 10, # Starting count used for pagination.
'fields': ['caseId', 'summary'] # Select additional return fields.
}
Args:
session (Session): Session object configured with TC API Auth.
tql_filters (list): List of TQL filters.
params (dict): Additional query params (see example above).
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(
kwargs.pop('session', None), kwargs.pop('tql_filter', None), kwargs.pop('params', None)
)
self._model = UsersModel(**kwargs)
self.type_ = 'users'
def __iter__(self) -> 'User':
"""Iterate over CM objects."""
return self.iterate(base_class=User)
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.USERS.value
@property
def filter(self) -> 'UserFilter':
"""Return the type specific filter object."""
return UserFilter(self.tql)
class User(ObjectABC):
"""Users Object.
Args:
user_name (str, kwargs): The **user name** for the User.
"""
def __init__(self, **kwargs) -> None:
"""Initialize class properties."""
super().__init__(kwargs.pop('session', None))
# properties
self._model = UserModel(**kwargs)
self._nested_field_name = 'users'
self._nested_filter = 'has_user'
self.type_ = 'User'
@property
def _api_endpoint(self) -> str:
"""Return the type specific API endpoint."""
return ApiEndpoints.USERS.value
@property
def model(self) -> 'UserModel':
"""Return the model data."""
return self._model
@model.setter
def model(self, data: Union['UserModel', dict]) -> None:
"""Create model using the provided data."""
if isinstance(data, type(self.model)):
# provided data is already a model, nothing required to change
self._model = data
elif isinstance(data, dict):
# provided data is raw response, load the model
self._model = type(self.model)(**data)
else:
raise RuntimeError(f'Invalid data type: {type(data)} provided.') | 0.927732 | 0.236296 |
import re
import os
from django.conf import settings as dj_settings
from django.utils.functional import lazy
from constance import config
from seahub.settings import SEAFILE_VERSION, SITE_TITLE, SITE_NAME, \
MAX_FILE_NAME, LOGO_PATH, BRANDING_CSS, LOGO_WIDTH, LOGO_HEIGHT,\
SHOW_REPO_DOWNLOAD_BUTTON, SITE_ROOT, ENABLE_GUEST_INVITATION, \
FAVICON_PATH, ENABLE_THUMBNAIL, THUMBNAIL_SIZE_FOR_ORIGINAL, \
MEDIA_ROOT, SHOW_LOGOUT_ICON, CUSTOM_LOGO_PATH, CUSTOM_FAVICON_PATH
from seahub.constants import DEFAULT_ADMIN
from seahub.utils import get_site_name
try:
from seahub.settings import SEACLOUD_MODE
except ImportError:
SEACLOUD_MODE = False
from seahub.utils import HAS_FILE_SEARCH, EVENTS_ENABLED, is_pro_version
try:
from seahub.settings import ENABLE_PUBFILE
except ImportError:
ENABLE_PUBFILE = False
try:
from seahub.settings import ENABLE_SYSADMIN_EXTRA
except ImportError:
ENABLE_SYSADMIN_EXTRA = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
def base(request):
"""
Add seahub base configure to the context.
"""
try:
org = request.user.org
except AttributeError:
org = None
# extra repo id from request path, use in search
repo_id_patt = r".*/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})/.*"
m = re.match(repo_id_patt, request.get_full_path())
search_repo_id = m.group(1) if m is not None else None
file_server_root = config.FILE_SERVER_ROOT
if not file_server_root.endswith('/'):
file_server_root += '/'
logo_path = LOGO_PATH
favicon_path = FAVICON_PATH
# filter ajax/api request out
if (not request.is_ajax()) and ("api2/" not in request.path) and \
("api/v2.1/" not in request.path):
# get logo path
custom_logo_file = os.path.join(MEDIA_ROOT, CUSTOM_LOGO_PATH)
if os.path.exists(custom_logo_file):
logo_path = CUSTOM_LOGO_PATH
# get favicon path
custom_favicon_file = os.path.join(MEDIA_ROOT, CUSTOM_FAVICON_PATH)
if os.path.exists(custom_favicon_file):
favicon_path = CUSTOM_FAVICON_PATH
result = {
'seafile_version': SEAFILE_VERSION,
'site_title': config.SITE_TITLE,
'branding_css': BRANDING_CSS,
'enable_branding_css': config.ENABLE_BRANDING_CSS,
'favicon_path': favicon_path,
'logo_path': logo_path,
'logo_width': LOGO_WIDTH,
'logo_height': LOGO_HEIGHT,
'seacloud_mode': SEACLOUD_MODE,
'cloud_mode': request.cloud_mode,
'org': org,
'site_name': get_site_name(),
'enable_signup': config.ENABLE_SIGNUP,
'max_file_name': MAX_FILE_NAME,
'has_file_search': HAS_FILE_SEARCH,
'enable_pubfile': ENABLE_PUBFILE,
'show_repo_download_button': SHOW_REPO_DOWNLOAD_BUTTON,
'share_link_password_min_length': config.SHARE_LINK_PASSWORD_MIN_LENGTH,
'repo_password_min_length': config.REPO_PASSWORD_MIN_LENGTH,
'events_enabled': EVENTS_ENABLED,
'sysadmin_extra_enabled': ENABLE_SYSADMIN_EXTRA,
'multi_tenancy': MULTI_TENANCY,
'multi_institution': getattr(dj_settings, 'MULTI_INSTITUTION', False),
'search_repo_id': search_repo_id,
'SITE_ROOT': SITE_ROOT,
'CSRF_COOKIE_NAME': dj_settings.CSRF_COOKIE_NAME,
'constance_enabled': dj_settings.CONSTANCE_ENABLED,
'FILE_SERVER_ROOT': file_server_root,
'LOGIN_URL': dj_settings.LOGIN_URL,
'enable_thumbnail': ENABLE_THUMBNAIL,
'thumbnail_size_for_original': THUMBNAIL_SIZE_FOR_ORIGINAL,
'enable_guest_invitation': ENABLE_GUEST_INVITATION,
'enable_terms_and_conditions': config.ENABLE_TERMS_AND_CONDITIONS,
'show_logout_icon': SHOW_LOGOUT_ICON,
'is_pro': True if is_pro_version() else False,
'enable_repo_wiki_mode': dj_settings.ENABLE_REPO_WIKI_MODE,
'enable_upload_folder': dj_settings.ENABLE_UPLOAD_FOLDER,
'enable_resumable_fileupload': dj_settings.ENABLE_RESUMABLE_FILEUPLOAD,
}
if request.user.is_staff:
result['is_default_admin'] = request.user.admin_role == DEFAULT_ADMIN
return result
def debug(request):
"""
Returns context variables helpful for debugging.
"""
context_extras = {}
if dj_settings.DEBUG and request.META.get('REMOTE_ADDR') in dj_settings.INTERNAL_IPS or \
dj_settings.DEBUG and request.GET.get('_dev', '') == '1' or \
dj_settings.DEBUG and not dj_settings.COMPRESS_ENABLED:
context_extras['debug'] = True
from django.db import connection
# Return a lazy reference that computes connection.queries on access,
# to ensure it contains queries triggered after this function runs.
context_extras['sql_queries'] = lazy(lambda: connection.queries, list)
return context_extras | seahub/base/context_processors.py | import re
import os
from django.conf import settings as dj_settings
from django.utils.functional import lazy
from constance import config
from seahub.settings import SEAFILE_VERSION, SITE_TITLE, SITE_NAME, \
MAX_FILE_NAME, LOGO_PATH, BRANDING_CSS, LOGO_WIDTH, LOGO_HEIGHT,\
SHOW_REPO_DOWNLOAD_BUTTON, SITE_ROOT, ENABLE_GUEST_INVITATION, \
FAVICON_PATH, ENABLE_THUMBNAIL, THUMBNAIL_SIZE_FOR_ORIGINAL, \
MEDIA_ROOT, SHOW_LOGOUT_ICON, CUSTOM_LOGO_PATH, CUSTOM_FAVICON_PATH
from seahub.constants import DEFAULT_ADMIN
from seahub.utils import get_site_name
try:
from seahub.settings import SEACLOUD_MODE
except ImportError:
SEACLOUD_MODE = False
from seahub.utils import HAS_FILE_SEARCH, EVENTS_ENABLED, is_pro_version
try:
from seahub.settings import ENABLE_PUBFILE
except ImportError:
ENABLE_PUBFILE = False
try:
from seahub.settings import ENABLE_SYSADMIN_EXTRA
except ImportError:
ENABLE_SYSADMIN_EXTRA = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
def base(request):
"""
Add seahub base configure to the context.
"""
try:
org = request.user.org
except AttributeError:
org = None
# extra repo id from request path, use in search
repo_id_patt = r".*/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})/.*"
m = re.match(repo_id_patt, request.get_full_path())
search_repo_id = m.group(1) if m is not None else None
file_server_root = config.FILE_SERVER_ROOT
if not file_server_root.endswith('/'):
file_server_root += '/'
logo_path = LOGO_PATH
favicon_path = FAVICON_PATH
# filter ajax/api request out
if (not request.is_ajax()) and ("api2/" not in request.path) and \
("api/v2.1/" not in request.path):
# get logo path
custom_logo_file = os.path.join(MEDIA_ROOT, CUSTOM_LOGO_PATH)
if os.path.exists(custom_logo_file):
logo_path = CUSTOM_LOGO_PATH
# get favicon path
custom_favicon_file = os.path.join(MEDIA_ROOT, CUSTOM_FAVICON_PATH)
if os.path.exists(custom_favicon_file):
favicon_path = CUSTOM_FAVICON_PATH
result = {
'seafile_version': SEAFILE_VERSION,
'site_title': config.SITE_TITLE,
'branding_css': BRANDING_CSS,
'enable_branding_css': config.ENABLE_BRANDING_CSS,
'favicon_path': favicon_path,
'logo_path': logo_path,
'logo_width': LOGO_WIDTH,
'logo_height': LOGO_HEIGHT,
'seacloud_mode': SEACLOUD_MODE,
'cloud_mode': request.cloud_mode,
'org': org,
'site_name': get_site_name(),
'enable_signup': config.ENABLE_SIGNUP,
'max_file_name': MAX_FILE_NAME,
'has_file_search': HAS_FILE_SEARCH,
'enable_pubfile': ENABLE_PUBFILE,
'show_repo_download_button': SHOW_REPO_DOWNLOAD_BUTTON,
'share_link_password_min_length': config.SHARE_LINK_PASSWORD_MIN_LENGTH,
'repo_password_min_length': config.REPO_PASSWORD_MIN_LENGTH,
'events_enabled': EVENTS_ENABLED,
'sysadmin_extra_enabled': ENABLE_SYSADMIN_EXTRA,
'multi_tenancy': MULTI_TENANCY,
'multi_institution': getattr(dj_settings, 'MULTI_INSTITUTION', False),
'search_repo_id': search_repo_id,
'SITE_ROOT': SITE_ROOT,
'CSRF_COOKIE_NAME': dj_settings.CSRF_COOKIE_NAME,
'constance_enabled': dj_settings.CONSTANCE_ENABLED,
'FILE_SERVER_ROOT': file_server_root,
'LOGIN_URL': dj_settings.LOGIN_URL,
'enable_thumbnail': ENABLE_THUMBNAIL,
'thumbnail_size_for_original': THUMBNAIL_SIZE_FOR_ORIGINAL,
'enable_guest_invitation': ENABLE_GUEST_INVITATION,
'enable_terms_and_conditions': config.ENABLE_TERMS_AND_CONDITIONS,
'show_logout_icon': SHOW_LOGOUT_ICON,
'is_pro': True if is_pro_version() else False,
'enable_repo_wiki_mode': dj_settings.ENABLE_REPO_WIKI_MODE,
'enable_upload_folder': dj_settings.ENABLE_UPLOAD_FOLDER,
'enable_resumable_fileupload': dj_settings.ENABLE_RESUMABLE_FILEUPLOAD,
}
if request.user.is_staff:
result['is_default_admin'] = request.user.admin_role == DEFAULT_ADMIN
return result
def debug(request):
"""
Returns context variables helpful for debugging.
"""
context_extras = {}
if dj_settings.DEBUG and request.META.get('REMOTE_ADDR') in dj_settings.INTERNAL_IPS or \
dj_settings.DEBUG and request.GET.get('_dev', '') == '1' or \
dj_settings.DEBUG and not dj_settings.COMPRESS_ENABLED:
context_extras['debug'] = True
from django.db import connection
# Return a lazy reference that computes connection.queries on access,
# to ensure it contains queries triggered after this function runs.
context_extras['sql_queries'] = lazy(lambda: connection.queries, list)
return context_extras | 0.277179 | 0.062103 |
""" Tensorflow mT5 model. """
from ...utils import logging
from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
from .configuration_mt5 import MT5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
class TFMT5Model(TFT5Model):
r"""
This class overrides :class:`~transformers.TFT5Model`. Please check the superclass for the appropriate
documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5Model, T5Tokenizer
>>> model = TFMT5Model.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> summary = "Weiter Verhandlung in Syrien."
>>> batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], tgt_texts=[summary], return_tensors="tf")
>>> batch["decoder_input_ids"] = batch["labels"]
>>> del batch["labels"]
>>> outputs = model(batch)
>>> hidden_states = outputs.last_hidden_state
"""
model_type = "mt5"
config_class = MT5Config
class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration):
r"""
This class overrides :class:`~transformers.TFT5ForConditionalGeneration`. Please check the superclass for the
appropriate documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5ForConditionalGeneration, T5Tokenizer
>>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> summary = "Weiter Verhandlung in Syrien."
>>> batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], tgt_texts=[summary], return_tensors="tf")
>>> outputs = model(batch)
>>> loss = outputs.loss
"""
model_type = "mt5"
config_class = MT5Config
class TFMT5EncoderModel(TFT5EncoderModel):
r"""
This class overrides :class:`~transformers.TFT5EncoderModel`. Please check the superclass for the appropriate
documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5EncoderModel, T5Tokenizer
>>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> input_ids = tokenizer(article, return_tensors="tf").input_ids
>>> outputs = model(input_ids)
>>> hidden_state = outputs.last_hidden_state
"""
model_type = "mt5"
config_class = MT5Config | src/transformers/models/mt5/modeling_tf_mt5.py | """ Tensorflow mT5 model. """
from ...utils import logging
from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
from .configuration_mt5 import MT5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
class TFMT5Model(TFT5Model):
r"""
This class overrides :class:`~transformers.TFT5Model`. Please check the superclass for the appropriate
documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5Model, T5Tokenizer
>>> model = TFMT5Model.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> summary = "Weiter Verhandlung in Syrien."
>>> batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], tgt_texts=[summary], return_tensors="tf")
>>> batch["decoder_input_ids"] = batch["labels"]
>>> del batch["labels"]
>>> outputs = model(batch)
>>> hidden_states = outputs.last_hidden_state
"""
model_type = "mt5"
config_class = MT5Config
class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration):
r"""
This class overrides :class:`~transformers.TFT5ForConditionalGeneration`. Please check the superclass for the
appropriate documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5ForConditionalGeneration, T5Tokenizer
>>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> summary = "Weiter Verhandlung in Syrien."
>>> batch = tokenizer.prepare_seq2seq_batch(src_texts=[article], tgt_texts=[summary], return_tensors="tf")
>>> outputs = model(batch)
>>> loss = outputs.loss
"""
model_type = "mt5"
config_class = MT5Config
class TFMT5EncoderModel(TFT5EncoderModel):
r"""
This class overrides :class:`~transformers.TFT5EncoderModel`. Please check the superclass for the appropriate
documentation alongside usage examples.
Examples::
>>> from transformers import TFMT5EncoderModel, T5Tokenizer
>>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small")
>>> tokenizer = T5Tokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> input_ids = tokenizer(article, return_tensors="tf").input_ids
>>> outputs = model(input_ids)
>>> hidden_state = outputs.last_hidden_state
"""
model_type = "mt5"
config_class = MT5Config | 0.884171 | 0.537709 |
try:
from .waterfall import Waterfall
except:
from waterfall import Waterfall
from optparse import OptionParser
import sys
import os
#------
# Logging set up
import logging
logger = logging.getLogger(__name__)
level_log = logging.INFO
if level_log == logging.INFO:
stream = sys.stdout
format = '%(name)-15s %(levelname)-8s %(message)s'
else:
stream = sys.stderr
format = '%%(relativeCreated)5d (name)-15s %(levelname)-8s %(message)s'
logging.basicConfig(format=format,stream=stream,level = level_log)
#------
def bl_scrunch(filename, out_dir='./', new_filename=None, max_load=None, f_scrunch=None):
""" Frequency scrunch (lower resolution by averaging) filename
Args:
filename (str): Name of file to open
out_dir (str):
"""
fil_file = Waterfall(filename, max_load=max_load)
if not new_filename:
new_filename = out_dir+filename.replace('.h5', '.scrunched.h5').split('/')[-1]
print("Using fscrunch %i" % f_scrunch)
fil_file.write_to_hdf5(new_filename, f_scrunch=f_scrunch)
def cmd_tool(args=None):
""" Command line utility for converting HDF5 (.h5) to Sigproc filterbank (.fil) format
Usage:
h52fil <FULL_PATH_TO_FIL_FILE> [options]
Options:
-h, --help show this help message and exit
-o OUT_DIR, --out_dir=OUT_DIR
Location for output files. Default: local dir.
-n NEW_FILENAME, --new_filename=NEW_FILENAME
New filename. Default: replaces extension to .fil
-d, --delete_input This option deletes the input file after conversion.
-l MAX_LOAD Maximum data limit to load. Default:1GB
"""
p = OptionParser()
p.set_usage('Command line utility for converting HDF5 (.h5) to Sigproc filterbank (.fil) format \n >>h52fil <FULL_PATH_TO_FIL_FILE> [options]')
p.add_option('-o', '--out_dir', dest='out_dir', type='str', default='./',
help='Location for output files. Default: local dir. ')
p.add_option('-n', '--new_filename', dest='new_filename', type='str', default='',
help='New filename. Default: replaces extension to .scrunched.h5')
p.add_option('-d', '--delete_input', dest='delete_input', action='store_true', default=False,
help='This option deletes the input file after conversion.')
p.add_option('-f', '--fscrunch', dest='f_scrunch', default=1, type=int,
help='Average (aka scrunch) across frequency. Number of channels to average together.')
p.add_option('-l', action='store', default=None, dest='max_load', type=float,
help='Maximum data limit to load. Default:1GB')
if args is None:
opts, args = p.parse_args(sys.argv[1:])
else:
opts, args = p.parse_args(args)
if len(args) != 1:
logger.info('Please specify a file name. \nExiting.')
sys.exit()
else:
filename = args[0]
if opts.f_scrunch == 1:
logger.info('Please specify frequency scrunch amount with -f \nExiting.')
sys.exit()
bl_scrunch(filename, out_dir=opts.out_dir, new_filename=opts.new_filename,
max_load=opts.max_load, f_scrunch=opts.f_scrunch)
if opts.delete_input:
logger.info("'Deleting input file: %s"%(filename))
os.remove(filename)
if __name__ == "__main__":
cmd_tool() | blimpy/bl_scrunch.py | try:
from .waterfall import Waterfall
except:
from waterfall import Waterfall
from optparse import OptionParser
import sys
import os
#------
# Logging set up
import logging
logger = logging.getLogger(__name__)
level_log = logging.INFO
if level_log == logging.INFO:
stream = sys.stdout
format = '%(name)-15s %(levelname)-8s %(message)s'
else:
stream = sys.stderr
format = '%%(relativeCreated)5d (name)-15s %(levelname)-8s %(message)s'
logging.basicConfig(format=format,stream=stream,level = level_log)
#------
def bl_scrunch(filename, out_dir='./', new_filename=None, max_load=None, f_scrunch=None):
""" Frequency scrunch (lower resolution by averaging) filename
Args:
filename (str): Name of file to open
out_dir (str):
"""
fil_file = Waterfall(filename, max_load=max_load)
if not new_filename:
new_filename = out_dir+filename.replace('.h5', '.scrunched.h5').split('/')[-1]
print("Using fscrunch %i" % f_scrunch)
fil_file.write_to_hdf5(new_filename, f_scrunch=f_scrunch)
def cmd_tool(args=None):
""" Command line utility for converting HDF5 (.h5) to Sigproc filterbank (.fil) format
Usage:
h52fil <FULL_PATH_TO_FIL_FILE> [options]
Options:
-h, --help show this help message and exit
-o OUT_DIR, --out_dir=OUT_DIR
Location for output files. Default: local dir.
-n NEW_FILENAME, --new_filename=NEW_FILENAME
New filename. Default: replaces extension to .fil
-d, --delete_input This option deletes the input file after conversion.
-l MAX_LOAD Maximum data limit to load. Default:1GB
"""
p = OptionParser()
p.set_usage('Command line utility for converting HDF5 (.h5) to Sigproc filterbank (.fil) format \n >>h52fil <FULL_PATH_TO_FIL_FILE> [options]')
p.add_option('-o', '--out_dir', dest='out_dir', type='str', default='./',
help='Location for output files. Default: local dir. ')
p.add_option('-n', '--new_filename', dest='new_filename', type='str', default='',
help='New filename. Default: replaces extension to .scrunched.h5')
p.add_option('-d', '--delete_input', dest='delete_input', action='store_true', default=False,
help='This option deletes the input file after conversion.')
p.add_option('-f', '--fscrunch', dest='f_scrunch', default=1, type=int,
help='Average (aka scrunch) across frequency. Number of channels to average together.')
p.add_option('-l', action='store', default=None, dest='max_load', type=float,
help='Maximum data limit to load. Default:1GB')
if args is None:
opts, args = p.parse_args(sys.argv[1:])
else:
opts, args = p.parse_args(args)
if len(args) != 1:
logger.info('Please specify a file name. \nExiting.')
sys.exit()
else:
filename = args[0]
if opts.f_scrunch == 1:
logger.info('Please specify frequency scrunch amount with -f \nExiting.')
sys.exit()
bl_scrunch(filename, out_dir=opts.out_dir, new_filename=opts.new_filename,
max_load=opts.max_load, f_scrunch=opts.f_scrunch)
if opts.delete_input:
logger.info("'Deleting input file: %s"%(filename))
os.remove(filename)
if __name__ == "__main__":
cmd_tool() | 0.343232 | 0.088583 |
from __future__ import unicode_literals
from itertools import chain
from operator import methodcaller
import regex as re
from six.moves import zip_longest
from dateparser.utils import normalize_unicode
PARSER_HARDCODED_TOKENS = [":", ".", " ", "-", "/"]
PARSER_KNOWN_TOKENS = ["am", "pm", "UTC", "GMT", "Z"]
ALWAYS_KEEP_TOKENS = ["+"] + PARSER_HARDCODED_TOKENS
KNOWN_WORD_TOKENS = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday',
'sunday', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december',
'year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'ago',
'in', 'next', 'after', 'am', 'pm']
PARENTHESES_PATTERN = re.compile(r'[\(\)]')
NUMERAL_PATTERN = re.compile(r'(\d+)')
KEEP_TOKEN_PATTERN = re.compile(r"^.*[^\W_].*$", flags=re.U)
class UnknownTokenError(Exception):
pass
class Dictionary(object):
"""
Class that modifies and stores translations and handles splitting of date string.
:param locale_info:
Locale info (translation data) of the locale.
:type language_info: dict
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: a Dictionary instance.
"""
_split_regex_cache = {}
_sorted_words_cache = {}
_split_relative_regex_cache = {}
_sorted_relative_strings_cache = {}
_match_relative_regex_cache = {}
def __init__(self, locale_info, settings=None):
dictionary = {}
self._settings = settings
self.info = locale_info
if 'skip' in locale_info:
skip = map(methodcaller('lower'), locale_info['skip'])
dictionary.update(zip_longest(skip, [], fillvalue=None))
if 'pertain' in locale_info:
pertain = map(methodcaller('lower'), locale_info['pertain'])
dictionary.update(zip_longest(pertain, [], fillvalue=None))
for word in KNOWN_WORD_TOKENS:
if word in locale_info:
translations = map(methodcaller('lower'), locale_info[word])
dictionary.update(zip_longest(translations, [], fillvalue=word))
dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
dictionary.update(zip_longest(map(methodcaller('lower'),
PARSER_KNOWN_TOKENS),
PARSER_KNOWN_TOKENS))
relative_type = locale_info.get('relative-type', {})
for key, value in relative_type.items():
relative_translations = map(methodcaller('lower'), value)
dictionary.update(zip_longest(relative_translations, [], fillvalue=key))
self._dictionary = dictionary
no_word_spacing = locale_info.get('no_word_spacing', 'False')
self._no_word_spacing = bool(eval(no_word_spacing))
relative_type_regex = locale_info.get("relative-type-regex", {})
self._relative_strings = list(chain(*relative_type_regex.values()))
def __contains__(self, key):
if key in self._settings.SKIP_TOKENS:
return True
return self._dictionary.__contains__(key)
def __getitem__(self, key):
if key in self._settings.SKIP_TOKENS:
return None
return self._dictionary.__getitem__(key)
def __iter__(self):
return chain(self._settings.SKIP_TOKENS, iter(self._dictionary))
def are_tokens_valid(self, tokens):
"""
Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise.
"""
has_only_keep_tokens = not set(tokens) - set(ALWAYS_KEEP_TOKENS)
if has_only_keep_tokens:
return False
match_relative_regex = self._get_match_relative_regex_cache()
for token in tokens:
if any([match_relative_regex.match(token),
token in self, token.isdigit()]):
continue
else:
return False
else:
return True
def split(self, string, keep_formatting=False):
"""
Split the date string using translations in locale info.
:param string:
Date string to be splitted.
:type string:
str|unicode
:param keep_formatting:
If True, retain formatting of the date string.
:type keep_formatting: bool
:return: A list of string tokens formed after splitting the date string.
"""
if not string:
return string
split_relative_regex = self._get_split_relative_regex_cache()
match_relative_regex = self._get_match_relative_regex_cache()
tokens = split_relative_regex.split(string)
for i, token in enumerate(tokens):
if match_relative_regex.match(token):
tokens[i] = [token]
continue
tokens[i] = self._split_by_known_words(token, keep_formatting)
return list(filter(bool, chain(*tokens)))
def _split_by_known_words(self, string, keep_formatting):
if not string:
return string
regex = self._get_split_regex_cache()
match = regex.match(string)
if not match:
return (self._split_by_numerals(string, keep_formatting)
if self._should_capture(string, keep_formatting) else [])
unparsed, known, unknown = match.groups()
splitted = [known] if self._should_capture(known, keep_formatting) else []
if unparsed and self._should_capture(unparsed, keep_formatting):
splitted = self._split_by_numerals(unparsed, keep_formatting) + splitted
if unknown:
splitted.extend(self._split_by_known_words(unknown, keep_formatting))
return splitted
def _split_by_numerals(self, string, keep_formatting):
return [token for token in NUMERAL_PATTERN.split(string)
if self._should_capture(token, keep_formatting)]
def _should_capture(self, token, keep_formatting):
return (
keep_formatting or
(token in ALWAYS_KEEP_TOKENS) or
KEEP_TOKEN_PATTERN.match(token)
)
def _get_sorted_words_from_cache(self):
if (
self._settings.registry_key not in self._sorted_words_cache or
self.info['name'] not in self._sorted_words_cache[self._settings.registry_key]
):
self._sorted_words_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
sorted([key for key in self], key=len, reverse=True)
return self._sorted_words_cache[self._settings.registry_key][self.info['name']]
def _get_split_regex_cache(self):
if (
self._settings.registry_key not in self._split_regex_cache or
self.info['name'] not in self._split_regex_cache[self._settings.registry_key]
):
self._construct_split_regex()
return self._split_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_split_regex(self):
known_words_group = "|".join(map(re.escape, self._get_sorted_words_from_cache()))
if self._no_word_spacing:
regex = r"^(.*?)({})(.*)$".format(known_words_group)
else:
regex = r"^(.*?(?:\A|\W|_|\d))({})((?:\Z|\W|_|\d).*)$".format(known_words_group)
self._split_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
def _get_sorted_relative_strings_from_cache(self):
if (
self._settings.registry_key not in self._sorted_relative_strings_cache or
self.info['name'] not in self._sorted_relative_strings_cache[self._settings.registry_key]
):
self._sorted_relative_strings_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
sorted([PARENTHESES_PATTERN.sub('', key) for key in
self._relative_strings], key=len, reverse=True)
return self._sorted_relative_strings_cache[self._settings.registry_key][self.info['name']]
def _get_split_relative_regex_cache(self):
if (
self._settings.registry_key not in self._split_relative_regex_cache or
self.info['name'] not in self._split_relative_regex_cache[self._settings.registry_key]
):
self._construct_split_relative_regex()
return self._split_relative_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_split_relative_regex(self):
known_relative_strings_group = "|".join(self._get_sorted_relative_strings_from_cache())
if self._no_word_spacing:
regex = "({})".format(known_relative_strings_group)
else:
regex = "(?<=(?:\\A|\\W|_))({})(?=(?:\\Z|\\W|_))".format(known_relative_strings_group)
self._split_relative_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
def _get_match_relative_regex_cache(self):
if (
self._settings.registry_key not in self._match_relative_regex_cache or
self.info['name'] not in self._match_relative_regex_cache[self._settings.registry_key]
):
self._construct_match_relative_regex()
return self._match_relative_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_match_relative_regex(self):
known_relative_strings_group = "|".join(self._get_sorted_relative_strings_from_cache())
regex = "^({})$".format(known_relative_strings_group)
self._match_relative_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
class NormalizedDictionary(Dictionary):
def __init__(self, locale_info, settings=None):
super(NormalizedDictionary, self).__init__(locale_info, settings)
self._normalize()
def _normalize(self):
new_dict = {}
conflicting_keys = []
for key, value in self._dictionary.items():
normalized = normalize_unicode(key)
if key != normalized and normalized in self._dictionary:
conflicting_keys.append(key)
else:
new_dict[normalized] = value
for key in conflicting_keys:
normalized = normalize_unicode(key)
if key in (self.info.get('skip', []) + self.info.get('pertain', [])):
new_dict[normalized] = self._dictionary[key]
self._dictionary = new_dict
self._relative_strings = list(map(normalize_unicode, self._relative_strings)) | dateparser/languages/dictionary.py | from __future__ import unicode_literals
from itertools import chain
from operator import methodcaller
import regex as re
from six.moves import zip_longest
from dateparser.utils import normalize_unicode
PARSER_HARDCODED_TOKENS = [":", ".", " ", "-", "/"]
PARSER_KNOWN_TOKENS = ["am", "pm", "UTC", "GMT", "Z"]
ALWAYS_KEEP_TOKENS = ["+"] + PARSER_HARDCODED_TOKENS
KNOWN_WORD_TOKENS = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday',
'sunday', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december',
'year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'ago',
'in', 'next', 'after', 'am', 'pm']
PARENTHESES_PATTERN = re.compile(r'[\(\)]')
NUMERAL_PATTERN = re.compile(r'(\d+)')
KEEP_TOKEN_PATTERN = re.compile(r"^.*[^\W_].*$", flags=re.U)
class UnknownTokenError(Exception):
pass
class Dictionary(object):
"""
Class that modifies and stores translations and handles splitting of date string.
:param locale_info:
Locale info (translation data) of the locale.
:type language_info: dict
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: a Dictionary instance.
"""
_split_regex_cache = {}
_sorted_words_cache = {}
_split_relative_regex_cache = {}
_sorted_relative_strings_cache = {}
_match_relative_regex_cache = {}
def __init__(self, locale_info, settings=None):
dictionary = {}
self._settings = settings
self.info = locale_info
if 'skip' in locale_info:
skip = map(methodcaller('lower'), locale_info['skip'])
dictionary.update(zip_longest(skip, [], fillvalue=None))
if 'pertain' in locale_info:
pertain = map(methodcaller('lower'), locale_info['pertain'])
dictionary.update(zip_longest(pertain, [], fillvalue=None))
for word in KNOWN_WORD_TOKENS:
if word in locale_info:
translations = map(methodcaller('lower'), locale_info[word])
dictionary.update(zip_longest(translations, [], fillvalue=word))
dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
dictionary.update(zip_longest(map(methodcaller('lower'),
PARSER_KNOWN_TOKENS),
PARSER_KNOWN_TOKENS))
relative_type = locale_info.get('relative-type', {})
for key, value in relative_type.items():
relative_translations = map(methodcaller('lower'), value)
dictionary.update(zip_longest(relative_translations, [], fillvalue=key))
self._dictionary = dictionary
no_word_spacing = locale_info.get('no_word_spacing', 'False')
self._no_word_spacing = bool(eval(no_word_spacing))
relative_type_regex = locale_info.get("relative-type-regex", {})
self._relative_strings = list(chain(*relative_type_regex.values()))
def __contains__(self, key):
if key in self._settings.SKIP_TOKENS:
return True
return self._dictionary.__contains__(key)
def __getitem__(self, key):
if key in self._settings.SKIP_TOKENS:
return None
return self._dictionary.__getitem__(key)
def __iter__(self):
return chain(self._settings.SKIP_TOKENS, iter(self._dictionary))
def are_tokens_valid(self, tokens):
"""
Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise.
"""
has_only_keep_tokens = not set(tokens) - set(ALWAYS_KEEP_TOKENS)
if has_only_keep_tokens:
return False
match_relative_regex = self._get_match_relative_regex_cache()
for token in tokens:
if any([match_relative_regex.match(token),
token in self, token.isdigit()]):
continue
else:
return False
else:
return True
def split(self, string, keep_formatting=False):
"""
Split the date string using translations in locale info.
:param string:
Date string to be splitted.
:type string:
str|unicode
:param keep_formatting:
If True, retain formatting of the date string.
:type keep_formatting: bool
:return: A list of string tokens formed after splitting the date string.
"""
if not string:
return string
split_relative_regex = self._get_split_relative_regex_cache()
match_relative_regex = self._get_match_relative_regex_cache()
tokens = split_relative_regex.split(string)
for i, token in enumerate(tokens):
if match_relative_regex.match(token):
tokens[i] = [token]
continue
tokens[i] = self._split_by_known_words(token, keep_formatting)
return list(filter(bool, chain(*tokens)))
def _split_by_known_words(self, string, keep_formatting):
if not string:
return string
regex = self._get_split_regex_cache()
match = regex.match(string)
if not match:
return (self._split_by_numerals(string, keep_formatting)
if self._should_capture(string, keep_formatting) else [])
unparsed, known, unknown = match.groups()
splitted = [known] if self._should_capture(known, keep_formatting) else []
if unparsed and self._should_capture(unparsed, keep_formatting):
splitted = self._split_by_numerals(unparsed, keep_formatting) + splitted
if unknown:
splitted.extend(self._split_by_known_words(unknown, keep_formatting))
return splitted
def _split_by_numerals(self, string, keep_formatting):
return [token for token in NUMERAL_PATTERN.split(string)
if self._should_capture(token, keep_formatting)]
def _should_capture(self, token, keep_formatting):
return (
keep_formatting or
(token in ALWAYS_KEEP_TOKENS) or
KEEP_TOKEN_PATTERN.match(token)
)
def _get_sorted_words_from_cache(self):
if (
self._settings.registry_key not in self._sorted_words_cache or
self.info['name'] not in self._sorted_words_cache[self._settings.registry_key]
):
self._sorted_words_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
sorted([key for key in self], key=len, reverse=True)
return self._sorted_words_cache[self._settings.registry_key][self.info['name']]
def _get_split_regex_cache(self):
if (
self._settings.registry_key not in self._split_regex_cache or
self.info['name'] not in self._split_regex_cache[self._settings.registry_key]
):
self._construct_split_regex()
return self._split_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_split_regex(self):
known_words_group = "|".join(map(re.escape, self._get_sorted_words_from_cache()))
if self._no_word_spacing:
regex = r"^(.*?)({})(.*)$".format(known_words_group)
else:
regex = r"^(.*?(?:\A|\W|_|\d))({})((?:\Z|\W|_|\d).*)$".format(known_words_group)
self._split_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
def _get_sorted_relative_strings_from_cache(self):
if (
self._settings.registry_key not in self._sorted_relative_strings_cache or
self.info['name'] not in self._sorted_relative_strings_cache[self._settings.registry_key]
):
self._sorted_relative_strings_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
sorted([PARENTHESES_PATTERN.sub('', key) for key in
self._relative_strings], key=len, reverse=True)
return self._sorted_relative_strings_cache[self._settings.registry_key][self.info['name']]
def _get_split_relative_regex_cache(self):
if (
self._settings.registry_key not in self._split_relative_regex_cache or
self.info['name'] not in self._split_relative_regex_cache[self._settings.registry_key]
):
self._construct_split_relative_regex()
return self._split_relative_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_split_relative_regex(self):
known_relative_strings_group = "|".join(self._get_sorted_relative_strings_from_cache())
if self._no_word_spacing:
regex = "({})".format(known_relative_strings_group)
else:
regex = "(?<=(?:\\A|\\W|_))({})(?=(?:\\Z|\\W|_))".format(known_relative_strings_group)
self._split_relative_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
def _get_match_relative_regex_cache(self):
if (
self._settings.registry_key not in self._match_relative_regex_cache or
self.info['name'] not in self._match_relative_regex_cache[self._settings.registry_key]
):
self._construct_match_relative_regex()
return self._match_relative_regex_cache[self._settings.registry_key][self.info['name']]
def _construct_match_relative_regex(self):
known_relative_strings_group = "|".join(self._get_sorted_relative_strings_from_cache())
regex = "^({})$".format(known_relative_strings_group)
self._match_relative_regex_cache.setdefault(
self._settings.registry_key, {})[self.info['name']] = \
re.compile(regex, re.UNICODE | re.IGNORECASE)
class NormalizedDictionary(Dictionary):
def __init__(self, locale_info, settings=None):
super(NormalizedDictionary, self).__init__(locale_info, settings)
self._normalize()
def _normalize(self):
new_dict = {}
conflicting_keys = []
for key, value in self._dictionary.items():
normalized = normalize_unicode(key)
if key != normalized and normalized in self._dictionary:
conflicting_keys.append(key)
else:
new_dict[normalized] = value
for key in conflicting_keys:
normalized = normalize_unicode(key)
if key in (self.info.get('skip', []) + self.info.get('pertain', [])):
new_dict[normalized] = self._dictionary[key]
self._dictionary = new_dict
self._relative_strings = list(map(normalize_unicode, self._relative_strings)) | 0.755276 | 0.131954 |
import time
import threading
import traceback
from thread import error as threadError
from lib.core import log
from lib.core.data import kb
from lib.core.settings import PYVERSION
from lib.core.exception import ZEROScanConnectionException
from lib.core.exception import ZEROScanThreadException
from lib.core.exception import ZEROScanValueException
def runThreads(numThreads, threadFunction, forwardException=True, startThreadMsg=True):
threads = []
numThreads = int(numThreads)
kb.multiThreadMode = True
kb.threadContinue = True
kb.threadException = False
try:
if numThreads > 1:
if startThreadMsg:
infoMsg = "starting %d threads" % numThreads
log.process(infoMsg)
else:
threadFunction()
return
for numThread in xrange(numThreads):
thread = threading.Thread(target=exceptionHandledFunction, name=str(numThread), args=[threadFunction])
setDaemon(thread)
try:
thread.start()
except threadError, errMsg:
errMsg = "error occurred while starting new thread ('%s')" % errMsg
log.error(errMsg)
break
threads.append(thread)
# And wait for them to all finish
alive = True
while alive:
alive = False
for thread in threads:
if thread.isAlive():
alive = True
time.sleep(0.1)
except KeyboardInterrupt:
print
kb.threadContinue = False
kb.threadException = True
if numThreads > 1:
log.process("waiting for threads to finish (Ctrl+C was pressed)")
try:
while (threading.activeCount() > 1):
pass
except KeyboardInterrupt:
raise ZEROScanThreadException("user aborted (Ctrl+C was pressed multiple times)")
if forwardException:
raise
except (ZEROScanConnectionException, ZEROScanValueException), errMsg:
print
kb.threadException = True
log.process("thread %s: %s" % (threading.currentThread().getName(), errMsg))
except:
from lib.core.common import unhandledExceptionMessage
print
kb.threadException = True
errMsg = unhandledExceptionMessage()
log.error("thread %s: %s" % (threading.currentThread().getName(), errMsg))
traceback.print_exc()
finally:
kb.multiThreadMode = False
kb.bruteMode = False
kb.threadContinue = True
kb.threadException = False
#前台线程(默认),都终止才终止;后台线程,前终止后立即终止
def setDaemon(thread):
# Reference: http://stackoverflow.com/questions/190010/daemon-threads-explanation
if PYVERSION >= "2.6":
thread.daemon = True
else:
thread.setDaemon(True)
def exceptionHandledFunction(threadFunction):
try:
threadFunction()
except KeyboardInterrupt:
kb.threadContinue = False
kb.threadException = True
raise
except Exception, errMsg:
# thread is just going to be silently killed
log.error("thread %s: %s" % (threading.currentThread().getName(), errMsg)) | lib/core/threads.py |
import time
import threading
import traceback
from thread import error as threadError
from lib.core import log
from lib.core.data import kb
from lib.core.settings import PYVERSION
from lib.core.exception import ZEROScanConnectionException
from lib.core.exception import ZEROScanThreadException
from lib.core.exception import ZEROScanValueException
def runThreads(numThreads, threadFunction, forwardException=True, startThreadMsg=True):
threads = []
numThreads = int(numThreads)
kb.multiThreadMode = True
kb.threadContinue = True
kb.threadException = False
try:
if numThreads > 1:
if startThreadMsg:
infoMsg = "starting %d threads" % numThreads
log.process(infoMsg)
else:
threadFunction()
return
for numThread in xrange(numThreads):
thread = threading.Thread(target=exceptionHandledFunction, name=str(numThread), args=[threadFunction])
setDaemon(thread)
try:
thread.start()
except threadError, errMsg:
errMsg = "error occurred while starting new thread ('%s')" % errMsg
log.error(errMsg)
break
threads.append(thread)
# And wait for them to all finish
alive = True
while alive:
alive = False
for thread in threads:
if thread.isAlive():
alive = True
time.sleep(0.1)
except KeyboardInterrupt:
print
kb.threadContinue = False
kb.threadException = True
if numThreads > 1:
log.process("waiting for threads to finish (Ctrl+C was pressed)")
try:
while (threading.activeCount() > 1):
pass
except KeyboardInterrupt:
raise ZEROScanThreadException("user aborted (Ctrl+C was pressed multiple times)")
if forwardException:
raise
except (ZEROScanConnectionException, ZEROScanValueException), errMsg:
print
kb.threadException = True
log.process("thread %s: %s" % (threading.currentThread().getName(), errMsg))
except:
from lib.core.common import unhandledExceptionMessage
print
kb.threadException = True
errMsg = unhandledExceptionMessage()
log.error("thread %s: %s" % (threading.currentThread().getName(), errMsg))
traceback.print_exc()
finally:
kb.multiThreadMode = False
kb.bruteMode = False
kb.threadContinue = True
kb.threadException = False
#前台线程(默认),都终止才终止;后台线程,前终止后立即终止
def setDaemon(thread):
# Reference: http://stackoverflow.com/questions/190010/daemon-threads-explanation
if PYVERSION >= "2.6":
thread.daemon = True
else:
thread.setDaemon(True)
def exceptionHandledFunction(threadFunction):
try:
threadFunction()
except KeyboardInterrupt:
kb.threadContinue = False
kb.threadException = True
raise
except Exception, errMsg:
# thread is just going to be silently killed
log.error("thread %s: %s" % (threading.currentThread().getName(), errMsg)) | 0.265024 | 0.054049 |
from rich.progress import Progress
import argparse
import os
import numpy as np
import torch
import torchaudio
from spectrogram import Spectrogram
def is_audio_file(name):
return name.lower().endswith('.mp3') or name.lower().endswith('.wav')
def save(data, path, **kwargs):
if 'clone' in kwargs and kwargs['clone']:
data = data.clone()
s = data.cpu()
if 'pt' in kwargs and kwargs['pt']:
torch.save(s, path)
else:
np.save(path, s)
def process(f, idx, output_dir, segment_length, segment_spacing, sample_rate, trim=0, transform=None, one_file=False,
progress: Progress = None, total=None, pt=True):
if progress is not None:
task = progress.add_task(f'{os.path.split(f)[-1]} ({idx + 1}/{total})', start=False)
waveform, sr = torchaudio.load(f)
# Take the first channel if more than one
waveform = waveform[0, :].to(device)
if sr != sample_rate:
waveform = torchaudio.transforms.Resample(sr, sample_rate)(waveform)
# Convert ms values to absolute number of samples
srps = sample_rate // 1000 # Sample rate per second
segment_length *= srps
segment_spacing *= srps
if not isinstance(trim, tuple):
trim = (abs(int(trim)) * srps, abs(int(trim)) * srps)
else:
trim = (abs(int(trim[0]) * srps), abs(int(trim[1])) * srps)
# <------------------Content-------------------->
# Trimmed of start | Segment | Skipped | ... | Segment | (Skipped) | Trimmed of end
seg_len = segment_length + segment_spacing
n = (waveform.size()[0] - trim[0] - trim[1]) // seg_len
if progress is not None:
progress.update(task, total=n, completed=0)
progress.start_task(task)
if one_file:
if not trim[0] == trim[1] == 0:
waveform = waveform[trim[0]:-trim[1]]
waveform = waveform[:n * seg_len].view(-1, seg_len)[:, :segment_length]
if transform is not None:
waveform = transform(waveform)
save(waveform, os.path.join(output_dir, f'{idx}.{"pt" if pt else "npy"}'), clone=True, pt=pt)
else:
offset = trim[0]
for i in range(n):
tmp = waveform[offset:segment_length * srps]
if transform is not None:
tmp = transform(tmp)
save(tmp, os.path.join(output_dir, f'{idx}-{i}.{"pt" if pt else "npy"}'), pt=pt)
offset += seg_len
if progress is not None:
progress.update(task, completed=i)
if progress is not None:
progress.update(task, completed=n)
progress.stop_task(task)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Prepare a dataset from raw files')
parser.add_argument('-d',
'--dir',
default='./raw',
help='Root directory for the files to be processed')
parser.add_argument('-od',
'--output_dir',
default='./dataset',
help='Root directory the output')
parser.add_argument(
'-sr',
'--sample_rate',
type=int,
default=16000,
help=
'Sample rate of the output, input will be resampled if it does not match'
)
parser.add_argument(
'-c',
'--cpu',
action='store_true',
help=
'Force CPU to be used, otherwise CUDA will be selected, if available')
parser.add_argument('-l',
'--segment_length',
default=2000,
type=int,
help='Length in ms of each extract')
parser.add_argument('-s',
'--segment_spacing',
default=0,
type=int,
help='Distance or offset after each extract in ms')
parser.add_argument(
'-t',
'--trim',
default=0,
type=int,
help='Ignore the first and last <value> ms of the file')
parser.add_argument('-pt',
default=False,
action='store_true',
help='Store as .pt (Pytorch) instead of .npy (Numpy)')
parser.add_argument('--one_file',
default=False,
action='store_true',
help='Store all snippets of a file in one file')
parser.add_argument('--as_spectrogram',
default=None,
help='Transform the waveform into either a `real` or `complex` spectrogram')
args = parser.parse_args()
device = 'cpu' if args.cpu or torch.cuda.is_available() else 'cuda'
if not os.path.isdir(args.dir):
raise Exception(f'`{args.dir}` is not a directory')
if not os.path.isdir(args.output_dir):
raise Exception(f'`{args.output_dir}` is not a directory')
# Process all files in args.dir or any of its subdirectories
files = list()
for (parent_path, _, filenames) in os.walk(args.dir):
files += [
os.path.join(parent_path, f) for f in filenames if is_audio_file(f)
]
if len(files) == 0:
print("No files found")
exit(0)
transform = Spectrogram(space=args.as_spectrogram, log2=True,
length=args.segment_length * args.sample_rate // 1000) \
if args.as_spectrogram is not None else None
with Progress() as progress:
for idx, f in enumerate(files):
process(f, idx, args.output_dir, args.segment_length, args.segment_spacing, args.sample_rate, args.trim,
transform=transform, one_file=args.one_file, pt=args.pt, total=len(files), progress=progress) | prepare_dataset.py | from rich.progress import Progress
import argparse
import os
import numpy as np
import torch
import torchaudio
from spectrogram import Spectrogram
def is_audio_file(name):
return name.lower().endswith('.mp3') or name.lower().endswith('.wav')
def save(data, path, **kwargs):
if 'clone' in kwargs and kwargs['clone']:
data = data.clone()
s = data.cpu()
if 'pt' in kwargs and kwargs['pt']:
torch.save(s, path)
else:
np.save(path, s)
def process(f, idx, output_dir, segment_length, segment_spacing, sample_rate, trim=0, transform=None, one_file=False,
progress: Progress = None, total=None, pt=True):
if progress is not None:
task = progress.add_task(f'{os.path.split(f)[-1]} ({idx + 1}/{total})', start=False)
waveform, sr = torchaudio.load(f)
# Take the first channel if more than one
waveform = waveform[0, :].to(device)
if sr != sample_rate:
waveform = torchaudio.transforms.Resample(sr, sample_rate)(waveform)
# Convert ms values to absolute number of samples
srps = sample_rate // 1000 # Sample rate per second
segment_length *= srps
segment_spacing *= srps
if not isinstance(trim, tuple):
trim = (abs(int(trim)) * srps, abs(int(trim)) * srps)
else:
trim = (abs(int(trim[0]) * srps), abs(int(trim[1])) * srps)
# <------------------Content-------------------->
# Trimmed of start | Segment | Skipped | ... | Segment | (Skipped) | Trimmed of end
seg_len = segment_length + segment_spacing
n = (waveform.size()[0] - trim[0] - trim[1]) // seg_len
if progress is not None:
progress.update(task, total=n, completed=0)
progress.start_task(task)
if one_file:
if not trim[0] == trim[1] == 0:
waveform = waveform[trim[0]:-trim[1]]
waveform = waveform[:n * seg_len].view(-1, seg_len)[:, :segment_length]
if transform is not None:
waveform = transform(waveform)
save(waveform, os.path.join(output_dir, f'{idx}.{"pt" if pt else "npy"}'), clone=True, pt=pt)
else:
offset = trim[0]
for i in range(n):
tmp = waveform[offset:segment_length * srps]
if transform is not None:
tmp = transform(tmp)
save(tmp, os.path.join(output_dir, f'{idx}-{i}.{"pt" if pt else "npy"}'), pt=pt)
offset += seg_len
if progress is not None:
progress.update(task, completed=i)
if progress is not None:
progress.update(task, completed=n)
progress.stop_task(task)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Prepare a dataset from raw files')
parser.add_argument('-d',
'--dir',
default='./raw',
help='Root directory for the files to be processed')
parser.add_argument('-od',
'--output_dir',
default='./dataset',
help='Root directory the output')
parser.add_argument(
'-sr',
'--sample_rate',
type=int,
default=16000,
help=
'Sample rate of the output, input will be resampled if it does not match'
)
parser.add_argument(
'-c',
'--cpu',
action='store_true',
help=
'Force CPU to be used, otherwise CUDA will be selected, if available')
parser.add_argument('-l',
'--segment_length',
default=2000,
type=int,
help='Length in ms of each extract')
parser.add_argument('-s',
'--segment_spacing',
default=0,
type=int,
help='Distance or offset after each extract in ms')
parser.add_argument(
'-t',
'--trim',
default=0,
type=int,
help='Ignore the first and last <value> ms of the file')
parser.add_argument('-pt',
default=False,
action='store_true',
help='Store as .pt (Pytorch) instead of .npy (Numpy)')
parser.add_argument('--one_file',
default=False,
action='store_true',
help='Store all snippets of a file in one file')
parser.add_argument('--as_spectrogram',
default=None,
help='Transform the waveform into either a `real` or `complex` spectrogram')
args = parser.parse_args()
device = 'cpu' if args.cpu or torch.cuda.is_available() else 'cuda'
if not os.path.isdir(args.dir):
raise Exception(f'`{args.dir}` is not a directory')
if not os.path.isdir(args.output_dir):
raise Exception(f'`{args.output_dir}` is not a directory')
# Process all files in args.dir or any of its subdirectories
files = list()
for (parent_path, _, filenames) in os.walk(args.dir):
files += [
os.path.join(parent_path, f) for f in filenames if is_audio_file(f)
]
if len(files) == 0:
print("No files found")
exit(0)
transform = Spectrogram(space=args.as_spectrogram, log2=True,
length=args.segment_length * args.sample_rate // 1000) \
if args.as_spectrogram is not None else None
with Progress() as progress:
for idx, f in enumerate(files):
process(f, idx, args.output_dir, args.segment_length, args.segment_spacing, args.sample_rate, args.trim,
transform=transform, one_file=args.one_file, pt=args.pt, total=len(files), progress=progress) | 0.596316 | 0.166777 |
import lmfit
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import numpy as np
class VarSlider:
def __init__(self, ax, valname, valmin, valmax,
valinit = None, valstep = None, callback = None):
self.ax = ax
self.valname = valname
if valinit is None:
valinit = (valmax + valmin)/2
self.slider = Slider(self.ax, valname, valmin, valmax,
valinit = valinit, valstep = valstep)
if callback is not None:
self.cid = self.slider.on_changed(callback)
else:
self.cid = None
@property
def valmin(self):
return self.slider.valmin
@valmin.setter
def valmin(self, v):
self.slider.valmin = v
self.ax.set_xlim(left = v)
@property
def valmax(self):
return self.slider.valmax
@valmax.setter
def valmax(self, v):
self.slider.valmax = v
self.ax.set_xlim(right = v)
@property
def valstep(self):
return self.slider.valstep
@valstep.setter
def valstep(self, v):
self.slider.valstep = v
@property
def value(self):
return self.slider.val
@value.setter
def value(self, v):
self.slider.set_val(v)
class Ilmfit:
def __init__(self, model, params = None, data = None, bounds = None, x =
None, y = None):
self.fig, self.ax = plt.subplots(1, 1)
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.fig.set_size_inches(8, 5)
plt.tight_layout()
self.fig.subplots_adjust(right = 0.55)
self.bounds = bounds
self.data = data
if data is not None:
self.x, self.y = data.T
self.data_line = self.ax.plot(self.x, self.y, label = 'Data',
linestyle = '--', marker = '.')
elif x is not None:
self.x = x
if y is not None:
self.y = y
self.data_line = self.ax.plot(self.x, self.y, label = 'Data',
linestyle = '--', marker = '.')
else:
self.y = None
self.data_line = None
else:
self.x = np.linspace(-5, 5, 100)
self.y = None
self.data_line = None
self.model = model
self.model_lines = []
if len(self.model.components) > 1:
self.model_lines += [self.ax.plot([], [], label = c.name)[0] for c in
self.model.components]
self.model_lines.append(self.ax.plot([], [], label = 'Total')[0])
if params is not None:
self.params = params
else:
self.params = model.make_params()
self.sliders = self.make_sliders()
self.ax.legend()
self.autoscale = True
self.update()
def make_sliders(self):
params = [v for v in self.params.values() if not v.expr]
sliders = {}
ys = np.linspace(0.9, 0.1, len(params) + 1)
for p, y in zip(params, ys):
ax = self.fig.add_axes([0.65, y, 0.20, 0.03])
try:
valmin, valmax = self.bounds.get(p.name)
except:
valmin = p.min if ~np.isinf(p.min) else -1
valmax = p.max if ~np.isinf(p.max) else 1
valinit = p.value if ~np.isinf(p.value) else (valmin + valmax)/2
sliders[p.name] = VarSlider(ax, p.name, valmin, valmax,
valinit = valinit, callback = self.update)
button_ax = self.fig.add_axes([0.65, 0.1, 0.20, 0.09])
self.button = Button(button_ax, 'Fit')
self.button.on_clicked(self.fit)
return sliders
@property
def independent_var(self):
return {self.model.independent_vars[0] : self.x}
def update(self, *args, **kwargs):
for k, p in self.sliders.items():
self.params[k].set(value = p.value)
if len(self.model.components) > 1:
for c, l in zip(self.model.components, self.model_lines):
l.set_data(self.x, c.eval(params = self.params,
**self.independent_var))
self.model_lines[-1].set_data(self.x, self.model.eval(params =
self.params, **self.independent_var))
# autoscale
if self.autoscale:
self.ax.relim()
self.ax.autoscale_view()
pass
def fit(self, *args, **kwargs):
self.res = self.model.fit(self.y, params = self.params,
**self.independent_var, **kwargs)
for p, v in self.res.best_values.items():
self.sliders[p].value = v
self.update()
def plot_res(res):
# component plotting for composite models
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw = {'height_ratios': [3, 1]})
x = res.userkws[res.model.independent_vars[0]]
y = res.data
ax.plot(x, y, linestyle = '', marker = '.', label = 'Raw')
for m in res.components:
my = m.eval(params = res.params, **res.userkws)
ax.plot(x, my, label = m.name)
if len(res.components) > 1:
ax.plot(x, res.eval(**res.userkws), linestyle = '--', label = 'Total Fit')
ax2.plot(x, res.eval(**res.userkws) - y, marker = '.', linestyle = '', label = 'Residual')
ax.legend()
ax2.legend()
plt.tight_layout() | ilmfit.py |
import lmfit
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import numpy as np
class VarSlider:
def __init__(self, ax, valname, valmin, valmax,
valinit = None, valstep = None, callback = None):
self.ax = ax
self.valname = valname
if valinit is None:
valinit = (valmax + valmin)/2
self.slider = Slider(self.ax, valname, valmin, valmax,
valinit = valinit, valstep = valstep)
if callback is not None:
self.cid = self.slider.on_changed(callback)
else:
self.cid = None
@property
def valmin(self):
return self.slider.valmin
@valmin.setter
def valmin(self, v):
self.slider.valmin = v
self.ax.set_xlim(left = v)
@property
def valmax(self):
return self.slider.valmax
@valmax.setter
def valmax(self, v):
self.slider.valmax = v
self.ax.set_xlim(right = v)
@property
def valstep(self):
return self.slider.valstep
@valstep.setter
def valstep(self, v):
self.slider.valstep = v
@property
def value(self):
return self.slider.val
@value.setter
def value(self, v):
self.slider.set_val(v)
class Ilmfit:
def __init__(self, model, params = None, data = None, bounds = None, x =
None, y = None):
self.fig, self.ax = plt.subplots(1, 1)
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.fig.set_size_inches(8, 5)
plt.tight_layout()
self.fig.subplots_adjust(right = 0.55)
self.bounds = bounds
self.data = data
if data is not None:
self.x, self.y = data.T
self.data_line = self.ax.plot(self.x, self.y, label = 'Data',
linestyle = '--', marker = '.')
elif x is not None:
self.x = x
if y is not None:
self.y = y
self.data_line = self.ax.plot(self.x, self.y, label = 'Data',
linestyle = '--', marker = '.')
else:
self.y = None
self.data_line = None
else:
self.x = np.linspace(-5, 5, 100)
self.y = None
self.data_line = None
self.model = model
self.model_lines = []
if len(self.model.components) > 1:
self.model_lines += [self.ax.plot([], [], label = c.name)[0] for c in
self.model.components]
self.model_lines.append(self.ax.plot([], [], label = 'Total')[0])
if params is not None:
self.params = params
else:
self.params = model.make_params()
self.sliders = self.make_sliders()
self.ax.legend()
self.autoscale = True
self.update()
def make_sliders(self):
params = [v for v in self.params.values() if not v.expr]
sliders = {}
ys = np.linspace(0.9, 0.1, len(params) + 1)
for p, y in zip(params, ys):
ax = self.fig.add_axes([0.65, y, 0.20, 0.03])
try:
valmin, valmax = self.bounds.get(p.name)
except:
valmin = p.min if ~np.isinf(p.min) else -1
valmax = p.max if ~np.isinf(p.max) else 1
valinit = p.value if ~np.isinf(p.value) else (valmin + valmax)/2
sliders[p.name] = VarSlider(ax, p.name, valmin, valmax,
valinit = valinit, callback = self.update)
button_ax = self.fig.add_axes([0.65, 0.1, 0.20, 0.09])
self.button = Button(button_ax, 'Fit')
self.button.on_clicked(self.fit)
return sliders
@property
def independent_var(self):
return {self.model.independent_vars[0] : self.x}
def update(self, *args, **kwargs):
for k, p in self.sliders.items():
self.params[k].set(value = p.value)
if len(self.model.components) > 1:
for c, l in zip(self.model.components, self.model_lines):
l.set_data(self.x, c.eval(params = self.params,
**self.independent_var))
self.model_lines[-1].set_data(self.x, self.model.eval(params =
self.params, **self.independent_var))
# autoscale
if self.autoscale:
self.ax.relim()
self.ax.autoscale_view()
pass
def fit(self, *args, **kwargs):
self.res = self.model.fit(self.y, params = self.params,
**self.independent_var, **kwargs)
for p, v in self.res.best_values.items():
self.sliders[p].value = v
self.update()
def plot_res(res):
# component plotting for composite models
fig, (ax, ax2) = plt.subplots(2, 1, gridspec_kw = {'height_ratios': [3, 1]})
x = res.userkws[res.model.independent_vars[0]]
y = res.data
ax.plot(x, y, linestyle = '', marker = '.', label = 'Raw')
for m in res.components:
my = m.eval(params = res.params, **res.userkws)
ax.plot(x, my, label = m.name)
if len(res.components) > 1:
ax.plot(x, res.eval(**res.userkws), linestyle = '--', label = 'Total Fit')
ax2.plot(x, res.eval(**res.userkws) - y, marker = '.', linestyle = '', label = 'Residual')
ax.legend()
ax2.legend()
plt.tight_layout() | 0.673836 | 0.272996 |
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class VGG(nn.Module):
"""A PyTorch Module implementing a VGG-based neural network.
The model contains 4 convolutional blocks (see :class:`ConvBlock`),
so 8 convolutional layers in total. After the convolutional layers,
the feature maps are average-pooled in the spatial dimensions. The
final fully-connected layer follows.
Args:
n_classes (int): Number of target classes.
"""
def __init__(self, n_classes):
super(VGG, self).__init__()
self.conv_layers = nn.Sequential(
ConvBlock(in_channels=1, out_channels=64),
ConvBlock(in_channels=64, out_channels=128),
ConvBlock(in_channels=128, out_channels=256),
ConvBlock(in_channels=256, out_channels=512)
)
self.classifier = nn.Linear(in_features=512, out_features=n_classes)
def forward(self, x):
"""Apply this module's forward pass."""
x = self.conv_layers(x)
x = F.adaptive_avg_pool2d(x, (1, 1)).view(x.shape[:2])
return self.classifier(x)
class ConvBlock(nn.Module):
"""A PyTorch Module implementing a convolutional block.
The block is comprised of two convolutional layers followed by batch
normalization, a ReLU non-linearity, and then an optional
max-pooling operation.
Args:
in_channels (int): Number of input channels (feature maps).
out_channels (int): Number of output channels (feature maps).
pool_size (tuple): Size of the max pooling kernel. A value of
``(1, 1)`` disables pooling.
kernel_size (int): Size of the convolving kernel.
**args: Keyword arguments to pass to :func:`torch.nn.Conv2d`.
"""
def __init__(self, in_channels, out_channels,
pool_size=(2, 2), kernel_size=3, **args):
super(ConvBlock, self).__init__()
padding = kernel_size // 2
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size,
padding=padding, bias=False, **args)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size,
padding=padding, bias=False, **args)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.pool_size = pool_size
def forward(self, x):
"""Apply this module's forward pass."""
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
if self.pool_size != (1, 1):
x = F.max_pool2d(x, self.pool_size)
return x
def create_model(model_type, n_classes):
"""Instantiate the specified PyTorch model.
Args:
model_type (str): Name of the model. Either ``'vgg'`` or
``'densenet'`` (case-insensitive).
n_classes (int): Number of target classes.
Returns:
nn.Module: An instance of the specified PyTorch model.
"""
if model_type.lower() == 'vgg':
model = VGG(n_classes)
elif model_type.lower() == 'densenet':
model = models.densenet201(pretrained=True)
model.classifier = nn.Linear(model.classifier.in_features, n_classes)
else:
raise ValueError(f'Unrecognized model type: {model_type}')
# Save the arguments that were passed to create the model
model.creation_args = (model_type, n_classes)
return model | ood_audio/pytorch/models.py | import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class VGG(nn.Module):
"""A PyTorch Module implementing a VGG-based neural network.
The model contains 4 convolutional blocks (see :class:`ConvBlock`),
so 8 convolutional layers in total. After the convolutional layers,
the feature maps are average-pooled in the spatial dimensions. The
final fully-connected layer follows.
Args:
n_classes (int): Number of target classes.
"""
def __init__(self, n_classes):
super(VGG, self).__init__()
self.conv_layers = nn.Sequential(
ConvBlock(in_channels=1, out_channels=64),
ConvBlock(in_channels=64, out_channels=128),
ConvBlock(in_channels=128, out_channels=256),
ConvBlock(in_channels=256, out_channels=512)
)
self.classifier = nn.Linear(in_features=512, out_features=n_classes)
def forward(self, x):
"""Apply this module's forward pass."""
x = self.conv_layers(x)
x = F.adaptive_avg_pool2d(x, (1, 1)).view(x.shape[:2])
return self.classifier(x)
class ConvBlock(nn.Module):
"""A PyTorch Module implementing a convolutional block.
The block is comprised of two convolutional layers followed by batch
normalization, a ReLU non-linearity, and then an optional
max-pooling operation.
Args:
in_channels (int): Number of input channels (feature maps).
out_channels (int): Number of output channels (feature maps).
pool_size (tuple): Size of the max pooling kernel. A value of
``(1, 1)`` disables pooling.
kernel_size (int): Size of the convolving kernel.
**args: Keyword arguments to pass to :func:`torch.nn.Conv2d`.
"""
def __init__(self, in_channels, out_channels,
pool_size=(2, 2), kernel_size=3, **args):
super(ConvBlock, self).__init__()
padding = kernel_size // 2
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size,
padding=padding, bias=False, **args)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size,
padding=padding, bias=False, **args)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.pool_size = pool_size
def forward(self, x):
"""Apply this module's forward pass."""
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
if self.pool_size != (1, 1):
x = F.max_pool2d(x, self.pool_size)
return x
def create_model(model_type, n_classes):
"""Instantiate the specified PyTorch model.
Args:
model_type (str): Name of the model. Either ``'vgg'`` or
``'densenet'`` (case-insensitive).
n_classes (int): Number of target classes.
Returns:
nn.Module: An instance of the specified PyTorch model.
"""
if model_type.lower() == 'vgg':
model = VGG(n_classes)
elif model_type.lower() == 'densenet':
model = models.densenet201(pretrained=True)
model.classifier = nn.Linear(model.classifier.in_features, n_classes)
else:
raise ValueError(f'Unrecognized model type: {model_type}')
# Save the arguments that were passed to create the model
model.creation_args = (model_type, n_classes)
return model | 0.973919 | 0.757705 |
from app.core import base
from fastapi import Request
from pydantic import BaseModel
import jwt
class JWTConfig(BaseModel):
"""Configuration class for configuring JWT authentication
Attributes:
algorithms: A comma separated list of approved algorithms to use
audience: The API audience
jwks: The URL to a JWKS endpoint for fetching keys
issuer: The token issuer
"""
algorithms: str = "RS256"
audience: str = ""
jwks: str = ""
issuer: str = ""
class JWTAuth(base.BaseAuth):
"""Provides an interface for authenticating requests with JWT tokens."""
def authenticate(self, request: Request) -> bool:
assert self.settings.jwt is not None
header: str = request.headers.get("Authorization", None)
if not header:
return False
try:
token: str = header.split("Bearer ")[1]
except IndexError:
return False
signing_key = self.client().get_signing_key_from_jwt(token).key
try:
jwt.decode(
token,
signing_key,
algorithms=self.settings.jwt.algorithms.split(", "),
audience=self.settings.jwt.audience,
issuer=self.settings.jwt.issuer,
)
except jwt.exceptions.DecodeError:
return False
return True
def client(self) -> jwt.PyJWKClient:
"""Creates a new `PyJWKClient` instance.
Returns:
A configured `PyJWKClient` instance.
"""
assert self.settings.jwt is not None
return jwt.PyJWKClient(self.settings.jwt.jwks)
@staticmethod
def validate(settings) -> None:
if settings.jwt is None:
raise base.ValidationError(
"Must set environment variables for JWT"
)
elif not settings.jwt.audience:
raise base.ValidationError(
"Must set the JWT audience environment variable"
)
elif not settings.jwt.jwks:
raise base.ValidationError(
"Must set the JWT JWKS environment variable"
)
elif not settings.jwt.issuer:
raise base.ValidationError(
"Must set the issuer environment variable"
) | app/core/auth/jwt.py | from app.core import base
from fastapi import Request
from pydantic import BaseModel
import jwt
class JWTConfig(BaseModel):
"""Configuration class for configuring JWT authentication
Attributes:
algorithms: A comma separated list of approved algorithms to use
audience: The API audience
jwks: The URL to a JWKS endpoint for fetching keys
issuer: The token issuer
"""
algorithms: str = "RS256"
audience: str = ""
jwks: str = ""
issuer: str = ""
class JWTAuth(base.BaseAuth):
"""Provides an interface for authenticating requests with JWT tokens."""
def authenticate(self, request: Request) -> bool:
assert self.settings.jwt is not None
header: str = request.headers.get("Authorization", None)
if not header:
return False
try:
token: str = header.split("Bearer ")[1]
except IndexError:
return False
signing_key = self.client().get_signing_key_from_jwt(token).key
try:
jwt.decode(
token,
signing_key,
algorithms=self.settings.jwt.algorithms.split(", "),
audience=self.settings.jwt.audience,
issuer=self.settings.jwt.issuer,
)
except jwt.exceptions.DecodeError:
return False
return True
def client(self) -> jwt.PyJWKClient:
"""Creates a new `PyJWKClient` instance.
Returns:
A configured `PyJWKClient` instance.
"""
assert self.settings.jwt is not None
return jwt.PyJWKClient(self.settings.jwt.jwks)
@staticmethod
def validate(settings) -> None:
if settings.jwt is None:
raise base.ValidationError(
"Must set environment variables for JWT"
)
elif not settings.jwt.audience:
raise base.ValidationError(
"Must set the JWT audience environment variable"
)
elif not settings.jwt.jwks:
raise base.ValidationError(
"Must set the JWT JWKS environment variable"
)
elif not settings.jwt.issuer:
raise base.ValidationError(
"Must set the issuer environment variable"
) | 0.877968 | 0.221435 |
from typing import Any, Type, Optional, Set
def _name(type_: Type) -> str:
return type_.__name__ if hasattr(type_, "__name__") else str(type_)
class DaciteError(Exception):
pass
class DaciteFieldError(DaciteError):
def __init__(self, field_path: Optional[str] = None):
super().__init__()
self.field_path = field_path
def update_path(self, parent_field_path: str) -> None:
if self.field_path:
self.field_path = f"{parent_field_path}.{self.field_path}"
else:
self.field_path = parent_field_path
class WrongTypeError(DaciteFieldError):
def __init__(self, field_type: Type, value: Any, field_path: Optional[str] = None) -> None:
super().__init__(field_path=field_path)
self.field_type = field_type
self.value = value
def __str__(self) -> str:
return (
f'wrong value type for field "{self.field_path}" - should be "{_name(self.field_type)}" '
f'instead of value "{self.value}" of type "{_name(type(self.value))}"'
)
class MissingValueError(DaciteFieldError):
def __init__(self, field_path: Optional[str] = None):
super().__init__(field_path=field_path)
def __str__(self) -> str:
return f'missing value for field "{self.field_path}"'
class UnionMatchError(WrongTypeError):
def __str__(self) -> str:
return (
f'can not match type "{_name(type(self.value))}" to any type '
f'of "{self.field_path}" union: {_name(self.field_type)}'
)
class ForwardReferenceError(DaciteError):
def __init__(self, message: str) -> None:
super().__init__()
self.message = message
def __str__(self) -> str:
return f"can not resolve forward reference: {self.message}"
class UnexpectedDataError(DaciteError):
def __init__(self, keys: Set[str]) -> None:
super().__init__()
self.keys = keys
def __str__(self) -> str:
formatted_keys = ", ".join(f'"{key}"' for key in self.keys)
return f"can not match {formatted_keys} to any data class field" | dacite/exceptions.py | from typing import Any, Type, Optional, Set
def _name(type_: Type) -> str:
return type_.__name__ if hasattr(type_, "__name__") else str(type_)
class DaciteError(Exception):
pass
class DaciteFieldError(DaciteError):
def __init__(self, field_path: Optional[str] = None):
super().__init__()
self.field_path = field_path
def update_path(self, parent_field_path: str) -> None:
if self.field_path:
self.field_path = f"{parent_field_path}.{self.field_path}"
else:
self.field_path = parent_field_path
class WrongTypeError(DaciteFieldError):
def __init__(self, field_type: Type, value: Any, field_path: Optional[str] = None) -> None:
super().__init__(field_path=field_path)
self.field_type = field_type
self.value = value
def __str__(self) -> str:
return (
f'wrong value type for field "{self.field_path}" - should be "{_name(self.field_type)}" '
f'instead of value "{self.value}" of type "{_name(type(self.value))}"'
)
class MissingValueError(DaciteFieldError):
def __init__(self, field_path: Optional[str] = None):
super().__init__(field_path=field_path)
def __str__(self) -> str:
return f'missing value for field "{self.field_path}"'
class UnionMatchError(WrongTypeError):
def __str__(self) -> str:
return (
f'can not match type "{_name(type(self.value))}" to any type '
f'of "{self.field_path}" union: {_name(self.field_type)}'
)
class ForwardReferenceError(DaciteError):
def __init__(self, message: str) -> None:
super().__init__()
self.message = message
def __str__(self) -> str:
return f"can not resolve forward reference: {self.message}"
class UnexpectedDataError(DaciteError):
def __init__(self, keys: Set[str]) -> None:
super().__init__()
self.keys = keys
def __str__(self) -> str:
formatted_keys = ", ".join(f'"{key}"' for key in self.keys)
return f"can not match {formatted_keys} to any data class field" | 0.925112 | 0.146606 |
import sys
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
import pythoncom
from win32com.server import util, connect
import win32com.server.policy
class MySite(axsite.AXSite):
def OnScriptError(self, error):
exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
print " >Exception:", exc[1]
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print " >" + line
class MyCollection(util.Collection):
def _NewEnum(self):
print "Making new Enumerator"
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo' ]
_public_attrs_ = ['collection', 'verbose']
def __init__(self):
self.verbose = 0
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = ''.join(map(str, args))
if self.verbose:
for arg in args:
print arg,
print
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.echo arg1
end sub
sub testcollection
test.verbose = 1
for each item in test.collection
test.echo "Collection item is", item
next
end sub
"""
PyScript = """\
print "PyScript is being parsed..."
prop = "Property Value"
def hello(arg1):
test.echo(arg1)
pass
def testcollection():
test.verbose = 1
# test.collection[1] = "New one"
for item in test.collection:
test.echo("Collection item is", item)
pass
"""
ErrScript = """\
bad code for everyone!
"""
def TestEngine(engineName, code, bShouldWork = 1):
echoer = Test()
model = {
'test' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
engine.AddCode(code, axscript.SCRIPTTEXT_ISPERSISTENT)
try:
engine.Start()
finally:
if not bShouldWork:
engine.Close()
return
doTestEngine(engine, echoer)
# re-transition the engine back to the UNINITIALIZED state, a-la ASP.
engine.eScript.SetScriptState(axscript.SCRIPTSTATE_UNINITIALIZED)
engine.eScript.SetScriptSite(util.wrap(site))
print "restarting"
engine.Start()
# all done!
engine.Close()
def doTestEngine(engine, echoer):
# Now call into the scripts IDispatch
from win32com.client.dynamic import Dispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
except pythoncom.com_error, exc:
print "***** Calling 'hello' failed", exc
return
if echoer.last != "Goober":
print "***** Function call didnt set value correctly", repr(echoer.last)
if str(ob.prop) != "Property Value":
print "***** Property Value not correct - ", repr(ob.prop)
ob.testcollection()
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
if result != 2:
print "Engine could not evaluate '1+1' - said the result was", result
def dotestall():
for i in xrange(10):
TestEngine("Python", PyScript)
print sys.gettotalrefcount()
## print "Testing Exceptions"
## try:
## TestEngine("Python", ErrScript, 0)
## except pythoncom.com_error:
## pass
def testall():
dotestall()
pythoncom.CoUninitialize()
print "AXScript Host worked correctly - %d/%d COM objects left alive." % (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount())
if __name__ == '__main__':
testall() | Src/StdLib/Lib/site-packages/win32comext/axscript/test/leakTest.py | import sys
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
import pythoncom
from win32com.server import util, connect
import win32com.server.policy
class MySite(axsite.AXSite):
def OnScriptError(self, error):
exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
print " >Exception:", exc[1]
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print " >" + line
class MyCollection(util.Collection):
def _NewEnum(self):
print "Making new Enumerator"
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo' ]
_public_attrs_ = ['collection', 'verbose']
def __init__(self):
self.verbose = 0
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = ''.join(map(str, args))
if self.verbose:
for arg in args:
print arg,
print
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.echo arg1
end sub
sub testcollection
test.verbose = 1
for each item in test.collection
test.echo "Collection item is", item
next
end sub
"""
PyScript = """\
print "PyScript is being parsed..."
prop = "Property Value"
def hello(arg1):
test.echo(arg1)
pass
def testcollection():
test.verbose = 1
# test.collection[1] = "New one"
for item in test.collection:
test.echo("Collection item is", item)
pass
"""
ErrScript = """\
bad code for everyone!
"""
def TestEngine(engineName, code, bShouldWork = 1):
echoer = Test()
model = {
'test' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
engine.AddCode(code, axscript.SCRIPTTEXT_ISPERSISTENT)
try:
engine.Start()
finally:
if not bShouldWork:
engine.Close()
return
doTestEngine(engine, echoer)
# re-transition the engine back to the UNINITIALIZED state, a-la ASP.
engine.eScript.SetScriptState(axscript.SCRIPTSTATE_UNINITIALIZED)
engine.eScript.SetScriptSite(util.wrap(site))
print "restarting"
engine.Start()
# all done!
engine.Close()
def doTestEngine(engine, echoer):
# Now call into the scripts IDispatch
from win32com.client.dynamic import Dispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
except pythoncom.com_error, exc:
print "***** Calling 'hello' failed", exc
return
if echoer.last != "Goober":
print "***** Function call didnt set value correctly", repr(echoer.last)
if str(ob.prop) != "Property Value":
print "***** Property Value not correct - ", repr(ob.prop)
ob.testcollection()
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
if result != 2:
print "Engine could not evaluate '1+1' - said the result was", result
def dotestall():
for i in xrange(10):
TestEngine("Python", PyScript)
print sys.gettotalrefcount()
## print "Testing Exceptions"
## try:
## TestEngine("Python", ErrScript, 0)
## except pythoncom.com_error:
## pass
def testall():
dotestall()
pythoncom.CoUninitialize()
print "AXScript Host worked correctly - %d/%d COM objects left alive." % (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount())
if __name__ == '__main__':
testall() | 0.196672 | 0.111386 |
import hashlib
import logging
import os
import shutil
import sys
if sys.version_info < (3, 6):
pass
logger = logging.getLogger(__name__)
def _file_reader_iter(path: str, block_size=2 ** 20):
with open(path, "rb") as f:
block = f.read(block_size)
while len(block) > 0:
yield block
block = f.read(block_size)
def calculate_sha3_384(path: str) -> str:
"""Calculate sha3 384 hash, reading the file in 1MB chunks."""
return calculate_hash(path, algorithm="sha3_384")
def calculate_hash(path: str, *, algorithm: str) -> str:
"""Calculate the hash for path with algorithm."""
# This will raise an AttributeError if algorithm is unsupported
hasher = getattr(hashlib, algorithm)()
for block in _file_reader_iter(path):
hasher.update(block)
return hasher.hexdigest()
def is_dumb_terminal():
"""Return True if on a dumb terminal."""
is_stdout_tty = os.isatty(1)
is_term_dumb = os.environ.get("TERM", "") == "dumb"
return not is_stdout_tty or is_term_dumb
def get_kubectl_directory() -> str:
"""
Get the correct directory to install kubectl into,
we can then call this when running `microk8s kubectl`
without interfering with any systemwide install.
:return: String
"""
if sys.platform == "win32":
if getattr(sys, "frozen", None):
d = os.path.dirname(sys.executable)
else:
d = os.path.dirname(os.path.abspath(__file__))
return os.path.join(d, "kubectl")
else:
full_path = shutil.which("kubectl")
return os.path.dirname(full_path)
def get_kubeconfig_path():
"""Return a MicroK8s specific kubeconfig path."""
if sys.platform == "win32":
return os.path.join(os.environ.get("LocalAppData"), "MicroK8s", "config")
else:
return os.path.join(os.path.expanduser("~"), ".microk8s", "config")
def clear_kubeconfig():
"""Clean kubeconfig file."""
if os.path.isdir(get_kubeconfig_path()):
shutil.rmtree(os.path.dirname(get_kubeconfig_path())) | installer/common/file_utils.py |
import hashlib
import logging
import os
import shutil
import sys
if sys.version_info < (3, 6):
pass
logger = logging.getLogger(__name__)
def _file_reader_iter(path: str, block_size=2 ** 20):
with open(path, "rb") as f:
block = f.read(block_size)
while len(block) > 0:
yield block
block = f.read(block_size)
def calculate_sha3_384(path: str) -> str:
"""Calculate sha3 384 hash, reading the file in 1MB chunks."""
return calculate_hash(path, algorithm="sha3_384")
def calculate_hash(path: str, *, algorithm: str) -> str:
"""Calculate the hash for path with algorithm."""
# This will raise an AttributeError if algorithm is unsupported
hasher = getattr(hashlib, algorithm)()
for block in _file_reader_iter(path):
hasher.update(block)
return hasher.hexdigest()
def is_dumb_terminal():
"""Return True if on a dumb terminal."""
is_stdout_tty = os.isatty(1)
is_term_dumb = os.environ.get("TERM", "") == "dumb"
return not is_stdout_tty or is_term_dumb
def get_kubectl_directory() -> str:
"""
Get the correct directory to install kubectl into,
we can then call this when running `microk8s kubectl`
without interfering with any systemwide install.
:return: String
"""
if sys.platform == "win32":
if getattr(sys, "frozen", None):
d = os.path.dirname(sys.executable)
else:
d = os.path.dirname(os.path.abspath(__file__))
return os.path.join(d, "kubectl")
else:
full_path = shutil.which("kubectl")
return os.path.dirname(full_path)
def get_kubeconfig_path():
"""Return a MicroK8s specific kubeconfig path."""
if sys.platform == "win32":
return os.path.join(os.environ.get("LocalAppData"), "MicroK8s", "config")
else:
return os.path.join(os.path.expanduser("~"), ".microk8s", "config")
def clear_kubeconfig():
"""Clean kubeconfig file."""
if os.path.isdir(get_kubeconfig_path()):
shutil.rmtree(os.path.dirname(get_kubeconfig_path())) | 0.469277 | 0.234735 |
# -----------------------------------------------------------------------------
# Module: dpa.restful
# Author: <NAME> (jtomlin)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports:
# -----------------------------------------------------------------------------
import copy
from .client import RestfulClientError
from .mixins import ListMixin, GetMixin
# -----------------------------------------------------------------------------
# Public Classes
# -----------------------------------------------------------------------------
class RestfulObject(object):
exception_class = None
# -------------------------------------------------------------------------
# Special methods:
# -------------------------------------------------------------------------
def __init__(self, data):
self._data = _RestfulData(data)
# -------------------------------------------------------------------------
def __getattr__(self, attr):
# look up the attribute in the _data
try:
return self._data.get(attr)
except _RestfulDataError:
raise AttributeError(
'{cls} instance has no attribute "{attr}"'.format(
cls=self.__class__.__name__,
attr=attr,
)
)
# -----------------------------------------------------------------------------
class RestfulObjectError(RestfulClientError):
pass
RestfulObject.exception_class = RestfulObjectError
# -----------------------------------------------------------------------------
class ReadOnlyRestfulObject(ListMixin, GetMixin, RestfulObject):
pass
# -----------------------------------------------------------------------------
# Private Classes:
# -----------------------------------------------------------------------------
class _RestfulData(object):
# -------------------------------------------------------------------------
# Special methods:
# -------------------------------------------------------------------------
def __init__(self, data_dict):
"""Constructor."""
super(_RestfulData, self).__init__()
self._data = data_dict
# -------------------------------------------------------------------------
# Instance methods:
# -------------------------------------------------------------------------
def get(self, attr):
try:
return self._data[attr]
except KeyError:
raise _RestfulDataError(
"No attribute '{a}' in data object.".format(a=attr))
# -------------------------------------------------------------------------
def set(self, attr, value):
if not attr in self._data.keys():
raise _RestfulDataError(
"No attribute '{a}' in data object.".format(a=attr))
self._data[attr] = value
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
@property
def data_dict(self):
return self._data
# -------------------------------------------------------------------------
@data_dict.setter
def data_dict(self, data):
self._data = data
# -----------------------------------------------------------------------------
# Public exception classes:
# -----------------------------------------------------------------------------
class _RestfulDataError(Exception):
pass | dpa/restful/__init__.py | # -----------------------------------------------------------------------------
# Module: dpa.restful
# Author: <NAME> (jtomlin)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports:
# -----------------------------------------------------------------------------
import copy
from .client import RestfulClientError
from .mixins import ListMixin, GetMixin
# -----------------------------------------------------------------------------
# Public Classes
# -----------------------------------------------------------------------------
class RestfulObject(object):
exception_class = None
# -------------------------------------------------------------------------
# Special methods:
# -------------------------------------------------------------------------
def __init__(self, data):
self._data = _RestfulData(data)
# -------------------------------------------------------------------------
def __getattr__(self, attr):
# look up the attribute in the _data
try:
return self._data.get(attr)
except _RestfulDataError:
raise AttributeError(
'{cls} instance has no attribute "{attr}"'.format(
cls=self.__class__.__name__,
attr=attr,
)
)
# -----------------------------------------------------------------------------
class RestfulObjectError(RestfulClientError):
pass
RestfulObject.exception_class = RestfulObjectError
# -----------------------------------------------------------------------------
class ReadOnlyRestfulObject(ListMixin, GetMixin, RestfulObject):
pass
# -----------------------------------------------------------------------------
# Private Classes:
# -----------------------------------------------------------------------------
class _RestfulData(object):
# -------------------------------------------------------------------------
# Special methods:
# -------------------------------------------------------------------------
def __init__(self, data_dict):
"""Constructor."""
super(_RestfulData, self).__init__()
self._data = data_dict
# -------------------------------------------------------------------------
# Instance methods:
# -------------------------------------------------------------------------
def get(self, attr):
try:
return self._data[attr]
except KeyError:
raise _RestfulDataError(
"No attribute '{a}' in data object.".format(a=attr))
# -------------------------------------------------------------------------
def set(self, attr, value):
if not attr in self._data.keys():
raise _RestfulDataError(
"No attribute '{a}' in data object.".format(a=attr))
self._data[attr] = value
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
@property
def data_dict(self):
return self._data
# -------------------------------------------------------------------------
@data_dict.setter
def data_dict(self, data):
self._data = data
# -----------------------------------------------------------------------------
# Public exception classes:
# -----------------------------------------------------------------------------
class _RestfulDataError(Exception):
pass | 0.528777 | 0.090454 |
import os
import soundfile
import librosa
import tensorflow as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
slim = tf.contrib.slim
def read_audio(path, target_fs=None):
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
### Feature extraction.
def extract_audioset_embedding():
"""Extract log mel spectrogram features.
"""
# Arguments & parameters
mel_bins = vggish_params.NUM_BANDS
sample_rate = vggish_params.SAMPLE_RATE
input_len = vggish_params.NUM_FRAMES
embedding_size = vggish_params.EMBEDDING_SIZE
'''You may modify the EXAMPLE_HOP_SECONDS in vggish_params.py to change the
hop size. '''
# Paths
cats_and_dogs_wavs = '/Users/<EMAIL>/Documents/ScalableMachineLearning/project/audio-cats-and-dogs/cats_dogs/cat_3.wav'
audio_path = cats_and_dogs_wavs
# audio_path = 'appendixes/01.wav'
checkpoint_path = os.path.join('vggish_model.ckpt')
pcm_params_path = os.path.join('vggish_pca_params.npz')
if not os.path.isfile(checkpoint_path):
raise Exception('Please download vggish_model.ckpt from '
'https://storage.googleapis.com/audioset/vggish_model.ckpt '
'and put it in the root of this codebase. ')
if not os.path.isfile(pcm_params_path):
raise Exception('Please download pcm_params_path from '
'https://storage.googleapis.com/audioset/vggish_pca_params.npz '
'and put it in the root of this codebase. ')
# Load model
sess = tf.Session()
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
pproc = vggish_postprocess.Postprocessor(pcm_params_path)
# Read audio
(audio, _) = read_audio(audio_path, target_fs=sample_rate)
# Extract log mel feature
logmel = vggish_input.waveform_to_examples(audio, sample_rate)
# Extract embedding feature
[embedding_batch] = sess.run([embedding_tensor], feed_dict={features_tensor: logmel})
# PCA
postprocessed_batch = pproc.postprocess(embedding_batch)
print('Audio length: {}'.format(len(audio)))
print('Log mel shape: {}'.format(logmel.shape))
print('Embedding feature shape: {}'.format(postprocessed_batch.shape))
print('Type: {}'.format(type(postprocessed_batch)))
print('Fetaures: {}'.format(postprocessed_batch))
if __name__ == '__main__':
extract_audioset_embedding() | extract_audioset_embedding/extract_embeddings.py | import os
import soundfile
import librosa
import tensorflow as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
slim = tf.contrib.slim
def read_audio(path, target_fs=None):
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
### Feature extraction.
def extract_audioset_embedding():
"""Extract log mel spectrogram features.
"""
# Arguments & parameters
mel_bins = vggish_params.NUM_BANDS
sample_rate = vggish_params.SAMPLE_RATE
input_len = vggish_params.NUM_FRAMES
embedding_size = vggish_params.EMBEDDING_SIZE
'''You may modify the EXAMPLE_HOP_SECONDS in vggish_params.py to change the
hop size. '''
# Paths
cats_and_dogs_wavs = '/Users/<EMAIL>/Documents/ScalableMachineLearning/project/audio-cats-and-dogs/cats_dogs/cat_3.wav'
audio_path = cats_and_dogs_wavs
# audio_path = 'appendixes/01.wav'
checkpoint_path = os.path.join('vggish_model.ckpt')
pcm_params_path = os.path.join('vggish_pca_params.npz')
if not os.path.isfile(checkpoint_path):
raise Exception('Please download vggish_model.ckpt from '
'https://storage.googleapis.com/audioset/vggish_model.ckpt '
'and put it in the root of this codebase. ')
if not os.path.isfile(pcm_params_path):
raise Exception('Please download pcm_params_path from '
'https://storage.googleapis.com/audioset/vggish_pca_params.npz '
'and put it in the root of this codebase. ')
# Load model
sess = tf.Session()
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
pproc = vggish_postprocess.Postprocessor(pcm_params_path)
# Read audio
(audio, _) = read_audio(audio_path, target_fs=sample_rate)
# Extract log mel feature
logmel = vggish_input.waveform_to_examples(audio, sample_rate)
# Extract embedding feature
[embedding_batch] = sess.run([embedding_tensor], feed_dict={features_tensor: logmel})
# PCA
postprocessed_batch = pproc.postprocess(embedding_batch)
print('Audio length: {}'.format(len(audio)))
print('Log mel shape: {}'.format(logmel.shape))
print('Embedding feature shape: {}'.format(postprocessed_batch.shape))
print('Type: {}'.format(type(postprocessed_batch)))
print('Fetaures: {}'.format(postprocessed_batch))
if __name__ == '__main__':
extract_audioset_embedding() | 0.533884 | 0.166947 |
import re
import shutil
SHA_256_FILE = "sha-256_ALICE.txt"
PBKDF_N_FILE_PREFIX = "pbkdf2-sha256-"
PBKDF_N_FILE_SUFFIX = ".txt"
IN_LEN = 512
OUT_LEN = 256
def mod_first():
PBKDF_OUT_FILE = PBKDF_N_FILE_PREFIX + "1e0" + PBKDF_N_FILE_SUFFIX
with open(SHA_256_FILE, "r") as in_f:
gates = in_f.readlines()
if not gates or len(gates) == 0:
print("Failed to read input file")
exit(1)
# Parse first two lines of bristol format
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", gates[0])
numgates = int(m.group(1))
numwires = int(m.group(2))
m = re.match(r"\s*(\d+)\s+(\d+)\s+(\d+)\s*\n", gates[1])
bobinputs = int(m.group(1))
aliceinputs = int(m.group(2))
outputs = int(m.group(3))
gates = gates[3:] # skip first three lines
if gates[-1] == "\n":
gates = gates[:-1]
#print(f"numgates: {numgates}\nnumwires: {numwires}\nbobinputs: {bobinputs}\naliceinputs: {aliceinputs}\noutputs: {outputs}")
# Changes to first SHA256 iteration:
# 1. Set wires 0-255 by XORing Alice's inputs with themselves to get 0 (prepend 256 gates)
# 2. Rather than outputting directly, XOR with these zero-wires to get same result
# (append 256 gates and wires)
# 3. Modify initial line to have numgates += 512, numwires += 256
# 4. No need to change internal wire/gate indices this time
new_numgates = numgates + 512
new_numwires = numwires + 256
with open(PBKDF_OUT_FILE, "w") as out_f:
out_f.writelines([
str(new_numgates)+" "+str(new_numwires)+"\n",
str(bobinputs)+" "+str(aliceinputs)+" "+str(outputs)+"\n",
"\n"
])
out_f.writelines(get_prependgates_first())
out_f.writelines(gates)
out_f.writelines(get_appendgates_first(numwires))
def get_prependgates_first():
return ["2 1 "+str(i+256)+" "+str(i+256)+" "+str(i)+" XOR\n" for i in range(256)]
def overwrite_startin_with_prevout(line, start):
# must call AFTER incrementing wires to start at start
if line[0] == "1":
m = re.match(r"\s*1\s+1\s+(\d+)\s+(\d+)\s+INV\s*\n", line)
inwire = int(m.group(1))
outwire = int(m.group(2))
if inwire < start + 256:
inwire = start - 512 + inwire
elif start+256 <= inwire < start+512:
inwire = inwire - start
if outwire < start + 256:
outwire = start - 512 + outwire
elif start+256 <= outwire < start+512:
outwire = outwire - start
return "1 1 "+str(inwire+wirenum)+" "+str(outwire+wirenum)+" INV\n"
elif line[0] == "2":
m = re.match(r"\s*2\s+1\s+(\d+)\s+(\d+)\s+(\d+)\s+(XOR|AND)\s*\n", line)
inwire1 = int(m.group(1))
inwire2 = int(m.group(2))
outwire = int(m.group(3))
opcode = int(m.group(4))
if inwire1 < start + 256:
inwire1 = start - 512 + inwire1
elif start+256 <= inwire1 < start+512:
inwire1 = inwire1 - start
if inwire1 < start + 256:
inwire2 = start - 512 + inwire2
elif start+256 <= inwire2 < start+512:
inwire2 = inwire2 - start
if outwire < start + 256:
outwire = start - 512 + outwire
elif start+256 <= outwire < start+512:
outwire = outwire - start
return "2 1 "+str(inwire1+wirenum)+" "+str(inwire2+wirenum)+" "+str(outwire+wirenum)+" "+opcode+"\n"
else:
print(f"Error: Gates must have 1 or 2 inputs, was given {line[0]}")
exit(1)
def get_appendgates_first(old_numwires):
return ["2 1 "+str(i)+" "+str(old_numwires-256+i)+" "+str(old_numwires+i)+" XOR\n" for i in range(256)]
def get_appendgates(start, next_old_numwires):
return ["2 1 "+str(start-256+i)+" "+str(next_old_numwires-256+i)+" "+str(next_old_numwires+i)+" XOR\n" for i in range(256)]
def process_line(line, start):
"""
i between 0 and 255: start - 512 + i (prev output)
i between 256 and 511: actual i (A's original input)
i between 512 and end: start + i - 512 (normal wires, but shifted by start-512)
does not handle appendgates
"""
#print("Processing line: ", line)
if line[0] == "1":
m = re.match(r"\s*1\s+1\s+(\d+)\s+(\d+)\s+INV\s*\n", line)
inwire = int(m.group(1))
if inwire >= 512 or inwire < 256:
inwire = start - 512 + inwire
outwire = int(m.group(2))
if outwire >= 512 or outwire < 256:
outwire = start - 512 + outwire
return "1 1 "+str(inwire)+" "+str(outwire)+" INV\n"
elif line[0] == "2":
m = re.match(r"\s*2\s+1\s+(\d+)\s+(\d+)\s+(\d+)\s+(XOR|AND)\s*\n", line)
inwire1 = int(m.group(1))
if inwire1 >= 512 or inwire1 < 256:
inwire1 = start - 512 + inwire1
inwire2 = int(m.group(2))
if inwire2 >= 512 or inwire2 < 256:
inwire2 = start - 512 + inwire2
outwire = int(m.group(3))
if outwire >= 512 or outwire < 256:
outwire = start - 512 + outwire
opcode = m.group(4)
return "2 1 "+str(inwire1)+" "+str(inwire2)+" "+str(outwire)+" "+opcode+"\n"
else:
print(f"Error: Gates must have 1 or 2 inputs, was given {line[0]}")
exit(1)
def add_n_iters(startfile, endfile, n):
with open(SHA_256_FILE, "r") as in_f:
sha256_gates = in_f.readlines()
# Parse first two lines of bristol format
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", sha256_gates[0])
orig_numgates = int(m.group(1))
orig_numwires = int(m.group(2))
m = re.match(r"\s*(\d+)\s+(\d+)\s+(\d+)\s*\n", sha256_gates[1])
orig_bobinputs = int(m.group(1))
orig_aliceinputs = int(m.group(2))
orig_outputs = int(m.group(3))
sha256_gates = sha256_gates[3:] # skip first three lines
if sha256_gates[-1] == "\n":
sha256_gates = sha256_gates[:-1]
prev_file = "tmp1.txt"
next_file = "tmp2.txt"
shutil.copyfile(startfile, prev_file)
# To add next iteration to modded first:
# 1. 2nd-last 256 of previous should be 1st 256 of next
# 2. ORIGINAL 256-511 should be 2nd 256 of next
# 3. Append 256 additional wires: output XORed with last 256 of previous
for i in range(n):
print(f"Iteration {i}")
with open(prev_file, "r") as in_f:
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", in_f.readline())
prev_numgates = int(m.group(1))
prev_numwires = int(m.group(2))
in_f.readline()
in_f.readline() # get to beginning of fourth line
new_numgates = prev_numgates + orig_numgates + 256
new_numwires = prev_numwires + orig_numwires - 256 # prev + (new-512) + 256
with open(next_file, "w") as out_f:
out_f.writelines([str(new_numgates)+" "+str(new_numwires)+"\n",
str(orig_bobinputs)+" "+str(orig_aliceinputs)+" "+str(orig_outputs)+"\n",
"\n"
])
shutil.copyfileobj(in_f, out_f)
with open(next_file, "a") as out_f:
for line in sha256_gates:
out_f.write(process_line(line, prev_numwires))
out_f.writelines(get_appendgates(prev_numwires, new_numwires-256))
shutil.copyfile(next_file, prev_file)
shutil.copyfile(next_file, endfile)
def pbkdf_file(nen):
return PBKDF_N_FILE_PREFIX + nen + PBKDF_N_FILE_SUFFIX
#add_n_iters(pbkdf_file("1e0"), pbkdf_file("1e2"), 99)
#add_n_iters(pbkdf_file("1e2"), pbkdf_file("1e3"), 900)
#add_n_iters(pbkdf_file("1e3"), pbkdf_file("5e3"), 4000)
add_n_iters(pbkdf_file("1e0"), pbkdf_file("1e1"), 9) | pbkdf2-sha256/create_pbkdf2_n.py |
import re
import shutil
SHA_256_FILE = "sha-256_ALICE.txt"
PBKDF_N_FILE_PREFIX = "pbkdf2-sha256-"
PBKDF_N_FILE_SUFFIX = ".txt"
IN_LEN = 512
OUT_LEN = 256
def mod_first():
PBKDF_OUT_FILE = PBKDF_N_FILE_PREFIX + "1e0" + PBKDF_N_FILE_SUFFIX
with open(SHA_256_FILE, "r") as in_f:
gates = in_f.readlines()
if not gates or len(gates) == 0:
print("Failed to read input file")
exit(1)
# Parse first two lines of bristol format
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", gates[0])
numgates = int(m.group(1))
numwires = int(m.group(2))
m = re.match(r"\s*(\d+)\s+(\d+)\s+(\d+)\s*\n", gates[1])
bobinputs = int(m.group(1))
aliceinputs = int(m.group(2))
outputs = int(m.group(3))
gates = gates[3:] # skip first three lines
if gates[-1] == "\n":
gates = gates[:-1]
#print(f"numgates: {numgates}\nnumwires: {numwires}\nbobinputs: {bobinputs}\naliceinputs: {aliceinputs}\noutputs: {outputs}")
# Changes to first SHA256 iteration:
# 1. Set wires 0-255 by XORing Alice's inputs with themselves to get 0 (prepend 256 gates)
# 2. Rather than outputting directly, XOR with these zero-wires to get same result
# (append 256 gates and wires)
# 3. Modify initial line to have numgates += 512, numwires += 256
# 4. No need to change internal wire/gate indices this time
new_numgates = numgates + 512
new_numwires = numwires + 256
with open(PBKDF_OUT_FILE, "w") as out_f:
out_f.writelines([
str(new_numgates)+" "+str(new_numwires)+"\n",
str(bobinputs)+" "+str(aliceinputs)+" "+str(outputs)+"\n",
"\n"
])
out_f.writelines(get_prependgates_first())
out_f.writelines(gates)
out_f.writelines(get_appendgates_first(numwires))
def get_prependgates_first():
return ["2 1 "+str(i+256)+" "+str(i+256)+" "+str(i)+" XOR\n" for i in range(256)]
def overwrite_startin_with_prevout(line, start):
# must call AFTER incrementing wires to start at start
if line[0] == "1":
m = re.match(r"\s*1\s+1\s+(\d+)\s+(\d+)\s+INV\s*\n", line)
inwire = int(m.group(1))
outwire = int(m.group(2))
if inwire < start + 256:
inwire = start - 512 + inwire
elif start+256 <= inwire < start+512:
inwire = inwire - start
if outwire < start + 256:
outwire = start - 512 + outwire
elif start+256 <= outwire < start+512:
outwire = outwire - start
return "1 1 "+str(inwire+wirenum)+" "+str(outwire+wirenum)+" INV\n"
elif line[0] == "2":
m = re.match(r"\s*2\s+1\s+(\d+)\s+(\d+)\s+(\d+)\s+(XOR|AND)\s*\n", line)
inwire1 = int(m.group(1))
inwire2 = int(m.group(2))
outwire = int(m.group(3))
opcode = int(m.group(4))
if inwire1 < start + 256:
inwire1 = start - 512 + inwire1
elif start+256 <= inwire1 < start+512:
inwire1 = inwire1 - start
if inwire1 < start + 256:
inwire2 = start - 512 + inwire2
elif start+256 <= inwire2 < start+512:
inwire2 = inwire2 - start
if outwire < start + 256:
outwire = start - 512 + outwire
elif start+256 <= outwire < start+512:
outwire = outwire - start
return "2 1 "+str(inwire1+wirenum)+" "+str(inwire2+wirenum)+" "+str(outwire+wirenum)+" "+opcode+"\n"
else:
print(f"Error: Gates must have 1 or 2 inputs, was given {line[0]}")
exit(1)
def get_appendgates_first(old_numwires):
return ["2 1 "+str(i)+" "+str(old_numwires-256+i)+" "+str(old_numwires+i)+" XOR\n" for i in range(256)]
def get_appendgates(start, next_old_numwires):
return ["2 1 "+str(start-256+i)+" "+str(next_old_numwires-256+i)+" "+str(next_old_numwires+i)+" XOR\n" for i in range(256)]
def process_line(line, start):
"""
i between 0 and 255: start - 512 + i (prev output)
i between 256 and 511: actual i (A's original input)
i between 512 and end: start + i - 512 (normal wires, but shifted by start-512)
does not handle appendgates
"""
#print("Processing line: ", line)
if line[0] == "1":
m = re.match(r"\s*1\s+1\s+(\d+)\s+(\d+)\s+INV\s*\n", line)
inwire = int(m.group(1))
if inwire >= 512 or inwire < 256:
inwire = start - 512 + inwire
outwire = int(m.group(2))
if outwire >= 512 or outwire < 256:
outwire = start - 512 + outwire
return "1 1 "+str(inwire)+" "+str(outwire)+" INV\n"
elif line[0] == "2":
m = re.match(r"\s*2\s+1\s+(\d+)\s+(\d+)\s+(\d+)\s+(XOR|AND)\s*\n", line)
inwire1 = int(m.group(1))
if inwire1 >= 512 or inwire1 < 256:
inwire1 = start - 512 + inwire1
inwire2 = int(m.group(2))
if inwire2 >= 512 or inwire2 < 256:
inwire2 = start - 512 + inwire2
outwire = int(m.group(3))
if outwire >= 512 or outwire < 256:
outwire = start - 512 + outwire
opcode = m.group(4)
return "2 1 "+str(inwire1)+" "+str(inwire2)+" "+str(outwire)+" "+opcode+"\n"
else:
print(f"Error: Gates must have 1 or 2 inputs, was given {line[0]}")
exit(1)
def add_n_iters(startfile, endfile, n):
with open(SHA_256_FILE, "r") as in_f:
sha256_gates = in_f.readlines()
# Parse first two lines of bristol format
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", sha256_gates[0])
orig_numgates = int(m.group(1))
orig_numwires = int(m.group(2))
m = re.match(r"\s*(\d+)\s+(\d+)\s+(\d+)\s*\n", sha256_gates[1])
orig_bobinputs = int(m.group(1))
orig_aliceinputs = int(m.group(2))
orig_outputs = int(m.group(3))
sha256_gates = sha256_gates[3:] # skip first three lines
if sha256_gates[-1] == "\n":
sha256_gates = sha256_gates[:-1]
prev_file = "tmp1.txt"
next_file = "tmp2.txt"
shutil.copyfile(startfile, prev_file)
# To add next iteration to modded first:
# 1. 2nd-last 256 of previous should be 1st 256 of next
# 2. ORIGINAL 256-511 should be 2nd 256 of next
# 3. Append 256 additional wires: output XORed with last 256 of previous
for i in range(n):
print(f"Iteration {i}")
with open(prev_file, "r") as in_f:
m = re.match(r"\s*(\d+)\s+(\d+)\s*\n", in_f.readline())
prev_numgates = int(m.group(1))
prev_numwires = int(m.group(2))
in_f.readline()
in_f.readline() # get to beginning of fourth line
new_numgates = prev_numgates + orig_numgates + 256
new_numwires = prev_numwires + orig_numwires - 256 # prev + (new-512) + 256
with open(next_file, "w") as out_f:
out_f.writelines([str(new_numgates)+" "+str(new_numwires)+"\n",
str(orig_bobinputs)+" "+str(orig_aliceinputs)+" "+str(orig_outputs)+"\n",
"\n"
])
shutil.copyfileobj(in_f, out_f)
with open(next_file, "a") as out_f:
for line in sha256_gates:
out_f.write(process_line(line, prev_numwires))
out_f.writelines(get_appendgates(prev_numwires, new_numwires-256))
shutil.copyfile(next_file, prev_file)
shutil.copyfile(next_file, endfile)
def pbkdf_file(nen):
return PBKDF_N_FILE_PREFIX + nen + PBKDF_N_FILE_SUFFIX
#add_n_iters(pbkdf_file("1e0"), pbkdf_file("1e2"), 99)
#add_n_iters(pbkdf_file("1e2"), pbkdf_file("1e3"), 900)
#add_n_iters(pbkdf_file("1e3"), pbkdf_file("5e3"), 4000)
add_n_iters(pbkdf_file("1e0"), pbkdf_file("1e1"), 9) | 0.20343 | 0.35942 |
from vortexasdk.endpoints.asset_tanks import AssetTanks
from vortexasdk.endpoints.attributes import Attributes
from vortexasdk.endpoints.cargo_movements import CargoMovements
from vortexasdk.endpoints.cargo_timeseries import CargoTimeSeries
from vortexasdk.endpoints.vessel_movements import VesselMovements
from vortexasdk.endpoints.corporations import Corporations
from vortexasdk.endpoints.geographies import Geographies
from vortexasdk.endpoints.products import Products
from vortexasdk.endpoints.vessels import Vessels
from vortexasdk.endpoints.storage_terminals import StorageTerminals
from vortexasdk.endpoints.tonne_miles_breakdown import TonneMilesBreakdown
from vortexasdk.endpoints.vessel_availability_breakdown import VesselAvailabilityBreakdown
from vortexasdk.endpoints.vessel_availability_timeseries import VesselAvailabilityTimeseries
from vortexasdk.endpoints.vessel_availability_search import VesselAvailabilitySearch
from vortexasdk.endpoints.fleet_utilisation_avg_distance_timeseries import FleetUtilisationAvgDistanceTimeseries
from vortexasdk.endpoints.fleet_utilisation_capacity_timeseries import FleetUtilisationCapacityTimeseries
from vortexasdk.endpoints.fleet_utilisation_quantity_timeseries import FleetUtilisationQuantityTimeseries
from vortexasdk.endpoints.fleet_utilisation_speed_breakdown import FleetUtilisationSpeedBreakdown
from vortexasdk.endpoints.fleet_utilisation_origin_breakdown import FleetUtilisationOriginBreakdown
from vortexasdk.endpoints.fleet_utilisation_destination_breakdown import FleetUtilisationDestinationBreakdown
from vortexasdk.endpoints.fleet_utilisation_timeseries import FleetUtilisationTimeseries
from vortexasdk.endpoints.eia_forecasts import EIAForecasts
from vortexasdk.endpoints.origin_breakdown import OriginBreakdown
from vortexasdk.endpoints.destination_breakdown import DestinationBreakdown
from vortexasdk.endpoints.onshore_inventories_timeseries import OnshoreInventoriesTimeseries
from vortexasdk.endpoints.onshore_inventories_search import OnshoreInventoriesSearch
from vortexasdk.endpoints.freight_pricing_search import FreightPricingSearch
from vortexasdk.endpoints.freight_pricing_timeseries import FreightPricingTimeseries
from vortexasdk.endpoints.voyages_timeseries import VoyagesTimeseries
from vortexasdk.endpoints.voyages_congestion_breakdown import VoyagesCongestionBreakdown
from vortexasdk.endpoints.voyages_top_hits import VoyagesTopHits
from vortexasdk.endpoints.voyages_search_enriched import VoyagesSearchEnriched | vortexasdk/endpoints/__init__.py | from vortexasdk.endpoints.asset_tanks import AssetTanks
from vortexasdk.endpoints.attributes import Attributes
from vortexasdk.endpoints.cargo_movements import CargoMovements
from vortexasdk.endpoints.cargo_timeseries import CargoTimeSeries
from vortexasdk.endpoints.vessel_movements import VesselMovements
from vortexasdk.endpoints.corporations import Corporations
from vortexasdk.endpoints.geographies import Geographies
from vortexasdk.endpoints.products import Products
from vortexasdk.endpoints.vessels import Vessels
from vortexasdk.endpoints.storage_terminals import StorageTerminals
from vortexasdk.endpoints.tonne_miles_breakdown import TonneMilesBreakdown
from vortexasdk.endpoints.vessel_availability_breakdown import VesselAvailabilityBreakdown
from vortexasdk.endpoints.vessel_availability_timeseries import VesselAvailabilityTimeseries
from vortexasdk.endpoints.vessel_availability_search import VesselAvailabilitySearch
from vortexasdk.endpoints.fleet_utilisation_avg_distance_timeseries import FleetUtilisationAvgDistanceTimeseries
from vortexasdk.endpoints.fleet_utilisation_capacity_timeseries import FleetUtilisationCapacityTimeseries
from vortexasdk.endpoints.fleet_utilisation_quantity_timeseries import FleetUtilisationQuantityTimeseries
from vortexasdk.endpoints.fleet_utilisation_speed_breakdown import FleetUtilisationSpeedBreakdown
from vortexasdk.endpoints.fleet_utilisation_origin_breakdown import FleetUtilisationOriginBreakdown
from vortexasdk.endpoints.fleet_utilisation_destination_breakdown import FleetUtilisationDestinationBreakdown
from vortexasdk.endpoints.fleet_utilisation_timeseries import FleetUtilisationTimeseries
from vortexasdk.endpoints.eia_forecasts import EIAForecasts
from vortexasdk.endpoints.origin_breakdown import OriginBreakdown
from vortexasdk.endpoints.destination_breakdown import DestinationBreakdown
from vortexasdk.endpoints.onshore_inventories_timeseries import OnshoreInventoriesTimeseries
from vortexasdk.endpoints.onshore_inventories_search import OnshoreInventoriesSearch
from vortexasdk.endpoints.freight_pricing_search import FreightPricingSearch
from vortexasdk.endpoints.freight_pricing_timeseries import FreightPricingTimeseries
from vortexasdk.endpoints.voyages_timeseries import VoyagesTimeseries
from vortexasdk.endpoints.voyages_congestion_breakdown import VoyagesCongestionBreakdown
from vortexasdk.endpoints.voyages_top_hits import VoyagesTopHits
from vortexasdk.endpoints.voyages_search_enriched import VoyagesSearchEnriched | 0.649912 | 0.138113 |
__author__ = ["<NAME>"]
__email__ = ["<EMAIL>"]
__status__ = "Prototype"
""" Ignite trainer for a Bimodal GAN architecture. """
from abc import ABC
from typing import Callable
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from vscvs.decorators import kwargs_parameter_dict
from vscvs.loss_functions import ContrastiveLoss
from vscvs.metrics import AverageDistancesMultimodalSiamesePairs
from vscvs.metrics import LossBimodalSiamesePairs
from vscvs.metrics import LossMultimodalGAN
from vscvs.models import InterModalDiscriminator
from vscvs.models import MultimodalEncoder
from vscvs.models import ResNextNormalized
from ..engines.gan import create_multimodal_gan_evaluator
from ..engines.gan import create_multimodal_gan_siamese_evaluator
from ..engines.gan import create_multimodal_gan_siamese_trainer
from ..engines.gan import create_multimodal_gan_trainer
from ..engines.gan import prepare_bimodal_batch_variables
from .gan import AbstractGANTrainer
class AbstractBiModalGANTrainer(AbstractGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture."""
def __init__(self, *args, mode_embedding_networks=None, loss_weight=None, **kwargs):
"""
:param args: AbstractGANTrainer arguments
:type: Tuple
:param mode_embedding_networks: the embedding networks for each mode. They will be used as generators for the
generative adversarial formulation.
:type: List[torch.nn.Module]
:param loss_weight: manual rescaling weight given to the loss of each batch element. If given, has to be a
Tensor of size `batch_size`.
:type: torch.Tensor
:param kwargs: AbstractGANTrainer keyword arguments
:type: Dict
"""
self.loss_weight = loss_weight
super().__init__(*args, generator_network=MultimodalEncoder(*mode_embedding_networks), **kwargs)
@property
@overrides
def loss(self):
""" """
return BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight)
@overrides
def _create_evaluator_engine(self):
""" """
loss = LossMultimodalGAN(self.loss)
return create_multimodal_gan_evaluator(
*self.model,
device=self.device,
metrics={"generator_loss": loss[0], "discriminator_loss": loss[1]},
prepare_batch_variables=prepare_bimodal_batch_variables
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_trainer(
*self.model,
*self.optimizer,
self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
class AbstractBiModalGANSiameseTrainer(AbstractBiModalGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture with
the addition of a contrastive term in the loss functions.
Args:
Returns:
"""
def __init__(self, *args, margin=0.2, **kwargs):
"""
:param args: AbstractBiModalGANTrainer arguments
:type: Tuple
:param margin: parameter for the contrastive loss, defining the acceptable threshold for considering the
embeddings of two examples as dissimilar. Dissimilar image pairs will be pushed apart unless their distance
is already greater than the margin. Similar sketch–image pairs will be pulled together in the feature space.
:type: float
:param kwargs: AbstractBiModalGANTrainer keyword arguments
:type: Dict
"""
self.margin = margin
super().__init__(*args, **kwargs)
@property
@overrides
def loss(self):
""" """
return (
BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight),
ContrastiveLoss(margin=self.margin, reduction=self.loss_reduction),
)
@overrides
def _create_evaluator_engine(self):
""" """
average_distances = AverageDistancesMultimodalSiamesePairs()
loss = LossBimodalSiamesePairs(self.loss)
return create_multimodal_gan_siamese_evaluator(
*self.model,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables,
metrics={
"Average Distance/positive": average_distances[0],
"Average Distance/negative": average_distances[1],
"Loss/generator": loss[0],
"Loss/discriminator": loss[1],
}
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_siamese_trainer(
*self.model,
*self.optimizer,
*self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
@kwargs_parameter_dict
def train_gan_bimodal(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANTrainer(optimizer_mixin, AbstractBiModalGANTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run()
@kwargs_parameter_dict
def train_gan_bimodal_siamese(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANSiameseTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANSiameseTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANSiameseTrainer(optimizer_mixin, AbstractBiModalGANSiameseTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANSiameseTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run() | vscvs/trainers/gan/bimodal.py | __author__ = ["<NAME>"]
__email__ = ["<EMAIL>"]
__status__ = "Prototype"
""" Ignite trainer for a Bimodal GAN architecture. """
from abc import ABC
from typing import Callable
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from vscvs.decorators import kwargs_parameter_dict
from vscvs.loss_functions import ContrastiveLoss
from vscvs.metrics import AverageDistancesMultimodalSiamesePairs
from vscvs.metrics import LossBimodalSiamesePairs
from vscvs.metrics import LossMultimodalGAN
from vscvs.models import InterModalDiscriminator
from vscvs.models import MultimodalEncoder
from vscvs.models import ResNextNormalized
from ..engines.gan import create_multimodal_gan_evaluator
from ..engines.gan import create_multimodal_gan_siamese_evaluator
from ..engines.gan import create_multimodal_gan_siamese_trainer
from ..engines.gan import create_multimodal_gan_trainer
from ..engines.gan import prepare_bimodal_batch_variables
from .gan import AbstractGANTrainer
class AbstractBiModalGANTrainer(AbstractGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture."""
def __init__(self, *args, mode_embedding_networks=None, loss_weight=None, **kwargs):
"""
:param args: AbstractGANTrainer arguments
:type: Tuple
:param mode_embedding_networks: the embedding networks for each mode. They will be used as generators for the
generative adversarial formulation.
:type: List[torch.nn.Module]
:param loss_weight: manual rescaling weight given to the loss of each batch element. If given, has to be a
Tensor of size `batch_size`.
:type: torch.Tensor
:param kwargs: AbstractGANTrainer keyword arguments
:type: Dict
"""
self.loss_weight = loss_weight
super().__init__(*args, generator_network=MultimodalEncoder(*mode_embedding_networks), **kwargs)
@property
@overrides
def loss(self):
""" """
return BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight)
@overrides
def _create_evaluator_engine(self):
""" """
loss = LossMultimodalGAN(self.loss)
return create_multimodal_gan_evaluator(
*self.model,
device=self.device,
metrics={"generator_loss": loss[0], "discriminator_loss": loss[1]},
prepare_batch_variables=prepare_bimodal_batch_variables
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_trainer(
*self.model,
*self.optimizer,
self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
class AbstractBiModalGANSiameseTrainer(AbstractBiModalGANTrainer, ABC):
"""Abstract class for creating Trainer classes with the common options needed for a bi-modal GAN architecture with
the addition of a contrastive term in the loss functions.
Args:
Returns:
"""
def __init__(self, *args, margin=0.2, **kwargs):
"""
:param args: AbstractBiModalGANTrainer arguments
:type: Tuple
:param margin: parameter for the contrastive loss, defining the acceptable threshold for considering the
embeddings of two examples as dissimilar. Dissimilar image pairs will be pushed apart unless their distance
is already greater than the margin. Similar sketch–image pairs will be pulled together in the feature space.
:type: float
:param kwargs: AbstractBiModalGANTrainer keyword arguments
:type: Dict
"""
self.margin = margin
super().__init__(*args, **kwargs)
@property
@overrides
def loss(self):
""" """
return (
BCEWithLogitsLoss(reduction=self.loss_reduction, weight=self.loss_weight),
ContrastiveLoss(margin=self.margin, reduction=self.loss_reduction),
)
@overrides
def _create_evaluator_engine(self):
""" """
average_distances = AverageDistancesMultimodalSiamesePairs()
loss = LossBimodalSiamesePairs(self.loss)
return create_multimodal_gan_siamese_evaluator(
*self.model,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables,
metrics={
"Average Distance/positive": average_distances[0],
"Average Distance/negative": average_distances[1],
"Loss/generator": loss[0],
"Loss/discriminator": loss[1],
}
)
@overrides
def _create_trainer_engine(self):
""" """
return create_multimodal_gan_siamese_trainer(
*self.model,
*self.optimizer,
*self.loss,
device=self.device,
prepare_batch_variables=prepare_bimodal_batch_variables
)
@kwargs_parameter_dict
def train_gan_bimodal(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANTrainer(optimizer_mixin, AbstractBiModalGANTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run()
@kwargs_parameter_dict
def train_gan_bimodal_siamese(*args, optimizer_mixin=None, **kwargs):
"""Train a bimodal GAN.
Args:
args: BiModalGANSiameseTrainer arguments
optimizer_mixin: Trainer mixin for creating Trainer classes that override the `AbstractTrainer`'s
`optimizer` property with a specific optimizer. (Default value = None)
kwargs: BiModalGANSiameseTrainer keyword arguments
*args:
**kwargs:
Returns:
"""
class BiModalGANSiameseTrainer(optimizer_mixin, AbstractBiModalGANSiameseTrainer):
""" """
_optimizer: Callable # type hinting `_optimizer` defined in `optimizer_mixin`, but is not recognized by PyCharm
trainer = BiModalGANSiameseTrainer(
*args,
discriminator_network=InterModalDiscriminator(input_dimension=250),
mode_embedding_networks=[
ResNextNormalized(out_features=250, pretrained=True),
ResNextNormalized(out_features=250, pretrained=True),
],
**kwargs
)
trainer.run() | 0.921675 | 0.389024 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.rawtags'
db.add_column(u'feedzilla_post', 'rawtags',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.rawtags'
db.delete_column(u'feedzilla_post', 'rawtags')
models = {
u'feedzilla.feed': {
'Meta': {'object_name': 'Feed'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'site_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'skip_filters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.filtertag': {
'Meta': {'object_name': 'FilterTag'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.filterword': {
'Meta': {'object_name': 'FilterWord'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.post': {
'Meta': {'ordering': "['-created']", 'object_name': 'Post'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['feedzilla.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'rawtags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.request': {
'Meta': {'ordering': "['-created']", 'object_name': 'Request'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['feedzilla'] | feedzilla/migrations/0007_auto__add_field_post_rawtags.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.rawtags'
db.add_column(u'feedzilla_post', 'rawtags',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.rawtags'
db.delete_column(u'feedzilla_post', 'rawtags')
models = {
u'feedzilla.feed': {
'Meta': {'object_name': 'Feed'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'site_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'skip_filters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.filtertag': {
'Meta': {'object_name': 'FilterTag'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.filterword': {
'Meta': {'object_name': 'FilterWord'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.post': {
'Meta': {'ordering': "['-created']", 'object_name': 'Post'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['feedzilla.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'rawtags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.request': {
'Meta': {'ordering': "['-created']", 'object_name': 'Request'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['feedzilla'] | 0.364551 | 0.104935 |
# Script for use with Gnome NetworkManager
import requests
import bs4
import sys
import os
import urllib.parse
import re
def connect_oebb():
sess = requests.Session()
portal_html = sess.get("http://detectportal.firefox.com").text
if portal_html.startswith("success"):
return
tree = bs4.BeautifulSoup(portal_html, "html.parser")
postdata = {}
for inp in tree.find_all("input"):
if not "name" in inp.attrs:
continue
postdata[inp["name"]] = inp["value"]
action_url = tree.find("form")["action"]
sess.post(action_url, data=postdata)
POST_DATA = 'request=%7B%22model%22%3A%22customers%22%2C%22method%22%3A%22loginOverLoginModule%22%2C%22formName%22%3A%22login_oneclicklogin%22%2C%22formData%22%3A%7B%22profiles_id%22%3A%222%22%2C%22policy_1%22%3A1%2C%22submit_login%22%3A%22Login%22%7D%2C%22requestType%22%3A%22formValidation%22%2C%22params%22%3A%7B%22formID%22%3A%22formLoginOneClickLogin_6%22%2C%22data%22%3A%7B%22profiles_id%22%3A%222%22%2C%22policy_1%22%3A1%2C%22submit_login%22%3A%22Login%22%7D%7D%2C%22countPageImpression%22%3Atrue%7D'
AJAX_URL = "https://portal-wab.oebb.at/Ajax/service/"
LOGIN_URL = "https://wab.oebb.at/login"
def connect_station():
sess = requests.Session()
sess.headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"
wab = sess.get("https://wab.oebb.at/login")
ap_mac_url = re.search(r"https://'\+getHostname\(\)\+'(/.+)';", wab.text)
portal_url = "https://portal-wab.oebb.at" + ap_mac_url.group(1)
portal = sess.get(portal_url)
login_req = sess.post(AJAX_URL, data=POST_DATA, headers={
'Accept': '*/*',
'Accept-Language': 'de-AT,en-US;q=0.7,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://portal-wab.oebb.at',
'Connection': 'keep-alive',
'Referer': 'https://portal-wab.oebb.at/customer/login'
})
login_resp = login_req.json()
login_data = urllib.parse.urlencode({
"username": login_resp["result"]["loginProcess"]["username"],
"password": login_resp["result"]["loginProcess"]["password"]})
sess.post(LOGIN_URL, data=login_data)
if sys.argv[2] == "up":
if os.environ["CONNECTION_ID"] == "OEBB":
connect_oebb()
elif os.environ["CONNECTION_ID"] == "OEBB-station":
connect_station() | oebbnetworkmanager.py |
# Script for use with Gnome NetworkManager
import requests
import bs4
import sys
import os
import urllib.parse
import re
def connect_oebb():
sess = requests.Session()
portal_html = sess.get("http://detectportal.firefox.com").text
if portal_html.startswith("success"):
return
tree = bs4.BeautifulSoup(portal_html, "html.parser")
postdata = {}
for inp in tree.find_all("input"):
if not "name" in inp.attrs:
continue
postdata[inp["name"]] = inp["value"]
action_url = tree.find("form")["action"]
sess.post(action_url, data=postdata)
POST_DATA = 'request=%7B%22model%22%3A%22customers%22%2C%22method%22%3A%22loginOverLoginModule%22%2C%22formName%22%3A%22login_oneclicklogin%22%2C%22formData%22%3A%7B%22profiles_id%22%3A%222%22%2C%22policy_1%22%3A1%2C%22submit_login%22%3A%22Login%22%7D%2C%22requestType%22%3A%22formValidation%22%2C%22params%22%3A%7B%22formID%22%3A%22formLoginOneClickLogin_6%22%2C%22data%22%3A%7B%22profiles_id%22%3A%222%22%2C%22policy_1%22%3A1%2C%22submit_login%22%3A%22Login%22%7D%7D%2C%22countPageImpression%22%3Atrue%7D'
AJAX_URL = "https://portal-wab.oebb.at/Ajax/service/"
LOGIN_URL = "https://wab.oebb.at/login"
def connect_station():
sess = requests.Session()
sess.headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"
wab = sess.get("https://wab.oebb.at/login")
ap_mac_url = re.search(r"https://'\+getHostname\(\)\+'(/.+)';", wab.text)
portal_url = "https://portal-wab.oebb.at" + ap_mac_url.group(1)
portal = sess.get(portal_url)
login_req = sess.post(AJAX_URL, data=POST_DATA, headers={
'Accept': '*/*',
'Accept-Language': 'de-AT,en-US;q=0.7,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://portal-wab.oebb.at',
'Connection': 'keep-alive',
'Referer': 'https://portal-wab.oebb.at/customer/login'
})
login_resp = login_req.json()
login_data = urllib.parse.urlencode({
"username": login_resp["result"]["loginProcess"]["username"],
"password": login_resp["result"]["loginProcess"]["password"]})
sess.post(LOGIN_URL, data=login_data)
if sys.argv[2] == "up":
if os.environ["CONNECTION_ID"] == "OEBB":
connect_oebb()
elif os.environ["CONNECTION_ID"] == "OEBB-station":
connect_station() | 0.142351 | 0.114864 |
from typing import Optional
from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE, AUTO_BIND_NO_TLS, AUTO_BIND_NONE
from .. import APP
from ..logging import AUTH_LOGGER
server: Server = Server(APP.config["LDAP_URI"], port=APP.config["LDAP_PORT"], use_ssl=APP.config["LDAP_SSL"])
def get_auto_bind() -> str:
auto_bind = AUTO_BIND_NO_TLS
if APP.config["LDAP_START_TLS"]:
auto_bind = AUTO_BIND_TLS_BEFORE_BIND
return auto_bind
def find_user(username: str) -> Optional[str]:
"""
Find the user identified by the given username.
Returns the distinguished name (dn) of the entry or None if none was found.
"""
user = None
password = None
if not APP.config["LDAP_ANONYMOUS_BIND"]:
user = APP.config["LDAP_SYSTEM_USER_DN"]
password = APP.config["LDAP_SYSTEM_USER_PW"]
user_filter = APP.config["LOGIN_SEARCH_USER_FILTER"]
user_filter = user_filter.replace("%", username)
with Connection(server, auto_bind=get_auto_bind(), read_only=True, user=user, password=password) as conn:
if not conn.search(APP.config["LDAP_USER_SEARCH_BASE"], user_filter, search_scope=SUBTREE):
AUTH_LOGGER.info("Cannot find a user matching the name %s.", username)
return None
return conn.response.pop()["dn"]
def generate_user_dn(username: str) -> str:
"""
Generate the user dn based on the configured pattern.
Returns the distinguished name (dn) of the entry.
"""
return APP.config["LOGIN_USER_DN_PATTERN"].replace("%", username)
def verify_bind(username: str, password: str) -> Optional[str]:
"""
Verify that the given user can bind to the ldap server with the given password.
Therefore this can be used to check the password of a user.
If bind is succesfull returns the user dn, otherwise None
"""
user_dn: str
if APP.config["LOGIN_SEARCH_USER"]:
result = find_user(username)
if not result:
return None
user_dn = result
else:
user_dn = generate_user_dn(username)
with Connection(server, auto_bind=AUTO_BIND_NONE, read_only=True, user=user_dn, password=password) as conn:
if APP.config["LDAP_START_TLS"]:
conn.start_tls()
if not conn.bind():
AUTH_LOGGER.info("User failed to log in: %s.", username)
return None
return user_dn
def check_user_matches_filter(user_dn: str, password: str, ldap_filter: str) -> bool:
"""
Check whether a user matches a ldap filter.
Pass the user dn, the password for the user and the ldap_filter as parameters.
Returns True if the given user matches the filter
"""
with Connection(server, auto_bind=get_auto_bind(), read_only=True, user=user_dn, password=password) as conn:
if not conn.search(APP.config["LDAP_USER_SEARCH_BASE"], ldap_filter, search_scope=SUBTREE):
return False
return user_dn in [resp["dn"] for resp in conn.response] | ums_api/logic/ldap.py | from typing import Optional
from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE, AUTO_BIND_NO_TLS, AUTO_BIND_NONE
from .. import APP
from ..logging import AUTH_LOGGER
server: Server = Server(APP.config["LDAP_URI"], port=APP.config["LDAP_PORT"], use_ssl=APP.config["LDAP_SSL"])
def get_auto_bind() -> str:
auto_bind = AUTO_BIND_NO_TLS
if APP.config["LDAP_START_TLS"]:
auto_bind = AUTO_BIND_TLS_BEFORE_BIND
return auto_bind
def find_user(username: str) -> Optional[str]:
"""
Find the user identified by the given username.
Returns the distinguished name (dn) of the entry or None if none was found.
"""
user = None
password = None
if not APP.config["LDAP_ANONYMOUS_BIND"]:
user = APP.config["LDAP_SYSTEM_USER_DN"]
password = APP.config["LDAP_SYSTEM_USER_PW"]
user_filter = APP.config["LOGIN_SEARCH_USER_FILTER"]
user_filter = user_filter.replace("%", username)
with Connection(server, auto_bind=get_auto_bind(), read_only=True, user=user, password=password) as conn:
if not conn.search(APP.config["LDAP_USER_SEARCH_BASE"], user_filter, search_scope=SUBTREE):
AUTH_LOGGER.info("Cannot find a user matching the name %s.", username)
return None
return conn.response.pop()["dn"]
def generate_user_dn(username: str) -> str:
"""
Generate the user dn based on the configured pattern.
Returns the distinguished name (dn) of the entry.
"""
return APP.config["LOGIN_USER_DN_PATTERN"].replace("%", username)
def verify_bind(username: str, password: str) -> Optional[str]:
"""
Verify that the given user can bind to the ldap server with the given password.
Therefore this can be used to check the password of a user.
If bind is succesfull returns the user dn, otherwise None
"""
user_dn: str
if APP.config["LOGIN_SEARCH_USER"]:
result = find_user(username)
if not result:
return None
user_dn = result
else:
user_dn = generate_user_dn(username)
with Connection(server, auto_bind=AUTO_BIND_NONE, read_only=True, user=user_dn, password=password) as conn:
if APP.config["LDAP_START_TLS"]:
conn.start_tls()
if not conn.bind():
AUTH_LOGGER.info("User failed to log in: %s.", username)
return None
return user_dn
def check_user_matches_filter(user_dn: str, password: str, ldap_filter: str) -> bool:
"""
Check whether a user matches a ldap filter.
Pass the user dn, the password for the user and the ldap_filter as parameters.
Returns True if the given user matches the filter
"""
with Connection(server, auto_bind=get_auto_bind(), read_only=True, user=user_dn, password=password) as conn:
if not conn.search(APP.config["LDAP_USER_SEARCH_BASE"], ldap_filter, search_scope=SUBTREE):
return False
return user_dn in [resp["dn"] for resp in conn.response] | 0.761982 | 0.108566 |
from typing import Optional
windows_icon = ":fontawesome-brands-windows:"
macos_icon = ":fontawesome-brands-apple:"
linux_icon = ":fontawesome-brands-linux:"
def download_link(repo: str, artifact: str, classifier: str, packaging: str, version="LATEST", group="org.metaborg"):
return f"https://artifacts.metaborg.org/service/local/artifact/maven/redirect?r={repo}&g={group}&a={artifact}&c={classifier}&p={packaging}&v={version}"
def eclipse_lwb_download_link(repo: str, variant: str, version: str):
return download_link(repo, "spoofax.lwb.eclipse.repository", f"spoofax3-{variant}", "zip", version)
def eclipse_lwb_download(icon: str, name: str, repo: str, variant: str, version: str):
return f"{icon} [{name}]({eclipse_lwb_download_link(repo, variant, version)})"
def fill_env_with_release(env, env_version: str, version: str, download_version: str, date: Optional[str]):
repo = "snapshots" if "SNAPSHOT" in version else "releases"
env.variables.release[env_version] = {
"date": date,
"version": version,
"lwb": {"eclipse": {
"install": {
"jvm": {
"windows": eclipse_lwb_download(windows_icon, "Windows 64-bit with embedded JVM", repo,
"win32-x86_64-jvm", download_version),
"macos": eclipse_lwb_download(macos_icon, "macOS 64-bit with embedded JVM", repo,
"macosx-x86_64-jvm",
download_version),
"linux": eclipse_lwb_download(linux_icon, "Linux 64-bit with embedded JVM", repo,
"linux-x86_64-jvm",
download_version),
},
"windows": eclipse_lwb_download(windows_icon, "Windows 64-bit", repo, "win32-x86_64", download_version),
"macos": eclipse_lwb_download(macos_icon, "macOS 64-bit", repo, "macosx-x86_64", download_version),
"linux": eclipse_lwb_download(linux_icon, "Linux 64-bit", repo, "linux-x86_64", download_version),
},
"repository": f"https://artifacts.metaborg.org/content/unzip/releases-unzipped/org/metaborg/spoofax.lwb.eclipse.repository/{version}/spoofax.lwb.eclipse.repository-{version}.zip-unzip/"
}}
}
release_versions = {
"0.14.0": "11-10-2021",
"0.13.0": "01-10-2021",
"0.12.1": "24-09-2021",
"0.12.0": "22-09-2021",
"0.11.13": "22-09-2021",
"0.11.12": "20-09-2021",
"0.11.11": "17-09-2021",
"0.11.10": "15-09-2021",
"0.11.9": "13-09-2021",
"0.11.8": "13-09-2021",
"0.11.7": "08-09-2021",
"0.11.6": "07-09-2021",
"0.11.5": "06-09-2021",
"0.11.4": "03-09-2021",
"0.11.3": "03-09-2021",
"0.11.2": "03-09-2021",
"0.11.1": "02-09-2021",
"0.11.0": "31-08-2021",
"0.10.0": "25-08-2021",
"0.9.0": "14-07-2021",
"0.8.0": "28-05-2021",
}
development_version = "develop-SNAPSHOT"
def define_env(env):
env.variables.os = {
"windows": f"{windows_icon} Windows",
"linux": f"{linux_icon} Linux",
"macos": f"{macos_icon} macOS",
}
env.variables.release = {}
for version, date in release_versions.items():
fill_env_with_release(env, version, version, version, date)
latest_rel_version, latest_rel_date = next(iter(release_versions.items()))
fill_env_with_release(env, "rel", latest_rel_version, latest_rel_version, latest_rel_date)
fill_env_with_release(env, "dev", "develop-SNAPSHOT", "LATEST", None) | docs/macro.py | from typing import Optional
windows_icon = ":fontawesome-brands-windows:"
macos_icon = ":fontawesome-brands-apple:"
linux_icon = ":fontawesome-brands-linux:"
def download_link(repo: str, artifact: str, classifier: str, packaging: str, version="LATEST", group="org.metaborg"):
return f"https://artifacts.metaborg.org/service/local/artifact/maven/redirect?r={repo}&g={group}&a={artifact}&c={classifier}&p={packaging}&v={version}"
def eclipse_lwb_download_link(repo: str, variant: str, version: str):
return download_link(repo, "spoofax.lwb.eclipse.repository", f"spoofax3-{variant}", "zip", version)
def eclipse_lwb_download(icon: str, name: str, repo: str, variant: str, version: str):
return f"{icon} [{name}]({eclipse_lwb_download_link(repo, variant, version)})"
def fill_env_with_release(env, env_version: str, version: str, download_version: str, date: Optional[str]):
repo = "snapshots" if "SNAPSHOT" in version else "releases"
env.variables.release[env_version] = {
"date": date,
"version": version,
"lwb": {"eclipse": {
"install": {
"jvm": {
"windows": eclipse_lwb_download(windows_icon, "Windows 64-bit with embedded JVM", repo,
"win32-x86_64-jvm", download_version),
"macos": eclipse_lwb_download(macos_icon, "macOS 64-bit with embedded JVM", repo,
"macosx-x86_64-jvm",
download_version),
"linux": eclipse_lwb_download(linux_icon, "Linux 64-bit with embedded JVM", repo,
"linux-x86_64-jvm",
download_version),
},
"windows": eclipse_lwb_download(windows_icon, "Windows 64-bit", repo, "win32-x86_64", download_version),
"macos": eclipse_lwb_download(macos_icon, "macOS 64-bit", repo, "macosx-x86_64", download_version),
"linux": eclipse_lwb_download(linux_icon, "Linux 64-bit", repo, "linux-x86_64", download_version),
},
"repository": f"https://artifacts.metaborg.org/content/unzip/releases-unzipped/org/metaborg/spoofax.lwb.eclipse.repository/{version}/spoofax.lwb.eclipse.repository-{version}.zip-unzip/"
}}
}
release_versions = {
"0.14.0": "11-10-2021",
"0.13.0": "01-10-2021",
"0.12.1": "24-09-2021",
"0.12.0": "22-09-2021",
"0.11.13": "22-09-2021",
"0.11.12": "20-09-2021",
"0.11.11": "17-09-2021",
"0.11.10": "15-09-2021",
"0.11.9": "13-09-2021",
"0.11.8": "13-09-2021",
"0.11.7": "08-09-2021",
"0.11.6": "07-09-2021",
"0.11.5": "06-09-2021",
"0.11.4": "03-09-2021",
"0.11.3": "03-09-2021",
"0.11.2": "03-09-2021",
"0.11.1": "02-09-2021",
"0.11.0": "31-08-2021",
"0.10.0": "25-08-2021",
"0.9.0": "14-07-2021",
"0.8.0": "28-05-2021",
}
development_version = "develop-SNAPSHOT"
def define_env(env):
env.variables.os = {
"windows": f"{windows_icon} Windows",
"linux": f"{linux_icon} Linux",
"macos": f"{macos_icon} macOS",
}
env.variables.release = {}
for version, date in release_versions.items():
fill_env_with_release(env, version, version, version, date)
latest_rel_version, latest_rel_date = next(iter(release_versions.items()))
fill_env_with_release(env, "rel", latest_rel_version, latest_rel_version, latest_rel_date)
fill_env_with_release(env, "dev", "develop-SNAPSHOT", "LATEST", None) | 0.833562 | 0.242015 |
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
NewType,
Sequence,
Tuple,
Type,
TypeVar,
TYPE_CHECKING,
Union,
)
from eth_typing import (
Address,
BlockNumber,
Hash32,
HexStr,
)
from mypy_extensions import (
TypedDict,
)
if TYPE_CHECKING:
from eth.rlp.transactions import ( # noqa: F401
BaseTransaction
)
from eth._utils.spoof import ( # noqa: F401
SpoofTransaction
)
from eth.vm.base import ( # noqa: F401
BaseVM
)
# TODO: Move into eth_typing
AccountDetails = TypedDict('AccountDetails',
{'balance': int,
'nonce': int,
'code': bytes,
'storage': Dict[int, int]
})
AccountState = Dict[Address, AccountDetails]
AccountDiff = Iterable[Tuple[Address, str, Union[int, bytes], Union[int, bytes]]]
BaseOrSpoofTransaction = Union['BaseTransaction', 'SpoofTransaction']
GeneralState = Union[
AccountState,
List[Tuple[Address, Dict[str, Union[int, bytes, Dict[int, int]]]]]
]
GenesisDict = Dict[str, Union[int, BlockNumber, bytes, Hash32]]
BytesOrView = Union[bytes, memoryview]
Normalizer = Callable[[Dict[Any, Any]], Dict[str, Any]]
RawAccountDetails = TypedDict('RawAccountDetails',
{'balance': HexStr,
'nonce': HexStr,
'code': HexStr,
'storage': Dict[HexStr, HexStr]
})
TransactionDict = TypedDict('TransactionDict',
{'nonce': int,
'gasLimit': int,
'gasPrice': int,
'to': Address,
'value': int,
'data': bytes,
'secretKey': bytes,
})
TransactionNormalizer = Callable[[TransactionDict], TransactionDict]
VMFork = Tuple[BlockNumber, Type['BaseVM']]
VMConfiguration = Sequence[VMFork]
VRS = NewType("VRS", Tuple[int, int, int])
IntConvertible = Union[int, bytes, HexStr, str]
TFunc = TypeVar('TFunc')
class StaticMethod(Generic[TFunc]):
"""
A property class purely to convince mypy to let us assign a function to an
instance variable. See more at: https://github.com/python/mypy/issues/708#issuecomment-405812141
"""
def __get__(self, oself: Any, owner: Any) -> TFunc:
return self._func
def __set__(self, oself: Any, value: TFunc) -> None:
self._func = value | eth/typing.py | from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
NewType,
Sequence,
Tuple,
Type,
TypeVar,
TYPE_CHECKING,
Union,
)
from eth_typing import (
Address,
BlockNumber,
Hash32,
HexStr,
)
from mypy_extensions import (
TypedDict,
)
if TYPE_CHECKING:
from eth.rlp.transactions import ( # noqa: F401
BaseTransaction
)
from eth._utils.spoof import ( # noqa: F401
SpoofTransaction
)
from eth.vm.base import ( # noqa: F401
BaseVM
)
# TODO: Move into eth_typing
AccountDetails = TypedDict('AccountDetails',
{'balance': int,
'nonce': int,
'code': bytes,
'storage': Dict[int, int]
})
AccountState = Dict[Address, AccountDetails]
AccountDiff = Iterable[Tuple[Address, str, Union[int, bytes], Union[int, bytes]]]
BaseOrSpoofTransaction = Union['BaseTransaction', 'SpoofTransaction']
GeneralState = Union[
AccountState,
List[Tuple[Address, Dict[str, Union[int, bytes, Dict[int, int]]]]]
]
GenesisDict = Dict[str, Union[int, BlockNumber, bytes, Hash32]]
BytesOrView = Union[bytes, memoryview]
Normalizer = Callable[[Dict[Any, Any]], Dict[str, Any]]
RawAccountDetails = TypedDict('RawAccountDetails',
{'balance': HexStr,
'nonce': HexStr,
'code': HexStr,
'storage': Dict[HexStr, HexStr]
})
TransactionDict = TypedDict('TransactionDict',
{'nonce': int,
'gasLimit': int,
'gasPrice': int,
'to': Address,
'value': int,
'data': bytes,
'secretKey': bytes,
})
TransactionNormalizer = Callable[[TransactionDict], TransactionDict]
VMFork = Tuple[BlockNumber, Type['BaseVM']]
VMConfiguration = Sequence[VMFork]
VRS = NewType("VRS", Tuple[int, int, int])
IntConvertible = Union[int, bytes, HexStr, str]
TFunc = TypeVar('TFunc')
class StaticMethod(Generic[TFunc]):
"""
A property class purely to convince mypy to let us assign a function to an
instance variable. See more at: https://github.com/python/mypy/issues/708#issuecomment-405812141
"""
def __get__(self, oself: Any, owner: Any) -> TFunc:
return self._func
def __set__(self, oself: Any, value: TFunc) -> None:
self._func = value | 0.461988 | 0.18704 |
import asyncio
import csv
import os
import shlex
import sys
import tempfile
from datetime import datetime, timezone
from subprocess import CalledProcessError
from typing import Dict, List, NamedTuple, Optional
import boto3
from mypy_boto3_ec2.service_resource import Instance
from mypy_boto3_ec2.type_defs import (
InstanceNetworkInterfaceSpecificationTypeDef,
InstanceTypeDef,
RunInstancesRequestRequestTypeDef,
)
from prettytable import PrettyTable
from materialize import git, spawn, ssh, ui
SPEAKER = ui.speaker("scratch> ")
ROOT = os.environ["MZ_ROOT"]
def tags(i: Instance) -> Dict[str, str]:
if not i.tags:
return {}
return {t["Key"]: t["Value"] for t in i.tags}
def name(tags: Dict[str, str]) -> Optional[str]:
return tags.get("Name")
def launched_by(tags: Dict[str, str]) -> Optional[str]:
return tags.get("LaunchedBy")
def delete_after(tags: Dict[str, str]) -> Optional[datetime]:
unix = tags.get("scratch-delete-after")
if not unix:
return None
unix = int(float(unix))
return datetime.fromtimestamp(unix)
def print_instances(ists: List[Instance], format: str) -> None:
field_names = [
"Name",
"Instance ID",
"Public IP Address",
"Private IP Address",
"Launched By",
"Delete After",
"State",
]
rows = [
[
name(tags),
i.instance_id,
i.public_ip_address,
i.private_ip_address,
launched_by(tags),
delete_after(tags),
i.state["Name"],
]
for (i, tags) in [(i, tags(i)) for i in ists]
]
if format == "table":
pt = PrettyTable()
pt.field_names = field_names
pt.add_rows(rows)
print(pt)
elif format == "csv":
w = csv.writer(sys.stdout)
w.writerow(field_names)
w.writerows(rows)
else:
raise RuntimeError("Unknown format passed to print_instances")
def launch(
*,
key_name: Optional[str],
instance_type: str,
ami: str,
tags: Dict[str, str],
display_name: Optional[str] = None,
subnet_id: Optional[str] = None,
size_gb: int,
security_group_id: str,
instance_profile: Optional[str],
nonce: str,
delete_after: int,
git_rev: str,
) -> Instance:
"""Launch and configure an ec2 instance with the given properties."""
if display_name:
tags["Name"] = display_name
tags["scratch-delete-after"] = str(delete_after)
tags["nonce"] = nonce
tags["git_ref"] = git.describe()
network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {
"AssociatePublicIpAddress": True,
"DeviceIndex": 0,
"Groups": [security_group_id],
}
if subnet_id:
network_interface["SubnetId"] = subnet_id
SPEAKER(f"launching instance {display_name or '(unnamed)'}")
with open(ROOT + "/misc/load-tests/provision.bash") as f:
provisioning_script = f.read()
kwargs: RunInstancesRequestRequestTypeDef = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": ami,
"InstanceType": instance_type, # type: ignore
"UserData": provisioning_script,
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [{"Key": k, "Value": v} for (k, v) in tags.items()],
}
],
"NetworkInterfaces": [network_interface],
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": size_gb,
"VolumeType": "gp3",
},
}
],
}
if key_name:
kwargs["KeyName"] = key_name
if instance_profile:
kwargs["IamInstanceProfile"] = {"Name": instance_profile}
i = boto3.resource("ec2").create_instances(**kwargs)[0]
print(i.tags)
return i
class CommandResult(NamedTuple):
status: str
stdout: str
stderr: str
async def run_ssm(i: Instance, commands: List[str], timeout: int = 60) -> CommandResult:
id = boto3.client("ssm").send_command(
InstanceIds=[i.instance_id],
DocumentName="AWS-RunShellScript",
Parameters={"commands": commands},
)["Command"]["CommandId"]
async for remaining in ui.async_timeout_loop(timeout, 5):
invocation_dne = boto3.client("ssm").exceptions.InvocationDoesNotExist
SPEAKER(f"Waiting for commands to finish running: {remaining}s remaining")
try:
result = boto3.client("ssm").get_command_invocation(
CommandId=id, InstanceId=i.instance_id
)
except invocation_dne:
continue
if result["Status"] != "InProgress":
return CommandResult(
status=result["Status"],
stdout=result["StandardOutputContent"],
stderr=result["StandardErrorContent"],
)
raise RuntimeError(
f"Command {commands} on instance {i} did not run in a reasonable amount of time"
)
async def setup(
i: Instance,
subnet_id: Optional[str],
local_pub_key: str,
identity_file: str,
git_rev: str,
) -> None:
def is_ready(i: Instance) -> bool:
return bool(
i.public_ip_address and i.state and i.state.get("Name") == "running"
)
done = False
async for remaining in ui.async_timeout_loop(60, 5):
SPEAKER(f"Waiting for instance to become ready: {remaining}s remaining")
i.reload()
if is_ready(i):
done = True
break
if not done:
raise RuntimeError(
f"Instance {i} did not become ready in a reasonable amount of time"
)
done = False
invalid_instance = boto3.client("ssm").exceptions.InvalidInstanceId
commands = [
"mkdir -p ~ubuntu/.ssh",
f"echo {local_pub_key} >> ~ubuntu/.ssh/authorized_keys",
]
import pprint
print("Running commands:")
pprint.pprint(commands)
async for remaining in ui.async_timeout_loop(180, 5):
try:
await run_ssm(i, commands, 180)
done = True
break
except invalid_instance:
pass
if not done:
raise RuntimeError(f"Failed to run SSM commands on instance {i}")
done = False
async for remaining in ui.async_timeout_loop(180, 5):
try:
ssh.runv(
["[", "-f", "/DONE", "]"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
done = True
break
except CalledProcessError:
continue
if not done:
raise RuntimeError(
"Instance did not finish setup in a reasonable amount of time"
)
mkrepo(i, identity_file, git_rev)
def mkrepo(i: Instance, identity_file: str, rev: str) -> None:
"""Create a Materialize repository on the remote ec2 instance and push the present repository to it."""
ssh.runv(
["git", "init", "--bare", "/home/ubuntu/materialize/.git"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
os.chdir(ROOT)
os.environ["GIT_SSH_COMMAND"] = f"ssh -i {identity_file}"
head_rev = git.rev_parse(rev)
git.push(
f"ubuntu@{i.<EMAIL>}:~/materialize/.git",
f"refs/heads/scratch_{head_rev}",
)
ssh.runv(
["git", "-C", "/home/ubuntu/materialize", "config", "core.bare", "false"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
ssh.runv(
["git", "-C", "/home/ubuntu/materialize", "checkout", head_rev],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
class MachineDesc(NamedTuple):
name: str
launch_script: Optional[str]
instance_type: str
ami: str
tags: Dict[str, str]
size_gb: int
async def setup_all(
instances: List[Instance],
subnet_id: str,
local_pub_key: str,
identity_file: str,
git_rev: str,
) -> None:
await asyncio.gather(
*(setup(i, subnet_id, local_pub_key, identity_file, git_rev) for i in instances)
)
def launch_cluster(
descs: List[MachineDesc],
nonce: str,
subnet_id: str,
key_name: Optional[str],
security_group_id: str,
instance_profile: Optional[str],
extra_tags: Dict[str, str],
delete_after: int, # Unix timestamp.
git_rev: str,
) -> List[Instance]:
"""Launch a cluster of instances with a given nonce"""
instances = [
launch(
key_name=key_name,
instance_type=d.instance_type,
ami=d.ami,
tags={**d.tags, **extra_tags},
display_name=f"{nonce}-{d.name}",
size_gb=d.size_gb,
subnet_id=subnet_id,
security_group_id=security_group_id,
instance_profile=instance_profile,
nonce=nonce,
git_rev=git_rev,
delete_after=delete_after,
)
for d in descs
]
# Generate temporary ssh key for running commands remotely
tmpdir = tempfile.TemporaryDirectory()
identity_file = f"{tmpdir.name}/id_rsa"
spawn.runv(["ssh-keygen", "-t", "rsa", "-N", "", "-f", identity_file])
with open(f"{tmpdir.name}/id_rsa.pub") as pk:
local_pub_key = pk.read().strip()
loop = asyncio.get_event_loop()
loop.run_until_complete(
setup_all(instances, subnet_id, local_pub_key, identity_file, git_rev)
)
loop.close()
hosts_str = "".join(
(f"{i.private_ip_address}\t{d.name}\n" for (i, d) in zip(instances, descs))
)
for i in instances:
ssh.runv(
[f"echo {shlex.quote(hosts_str)} | sudo tee -a /etc/hosts"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
for (i, d) in zip(instances, descs):
if d.launch_script:
ssh.runv(
[
"cd",
"~/materialize",
";",
"nohup",
"bash",
"-c",
shlex.quote(d.launch_script),
">~/mzscratch-startup.out",
"2>~/mzscratch-startup.err",
"&",
],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
tmpdir.cleanup()
return instances
def whoami() -> str:
return boto3.client("sts").get_caller_identity()["UserId"].split(":")[1]
def get_old_instances() -> List[InstanceTypeDef]:
def is_running(i: InstanceTypeDef) -> bool:
return i["State"]["Name"] == "running"
def is_old(i: InstanceTypeDef) -> bool:
tags_dict = {tag["Key"]: tag["Value"] for tag in i.get("Tags", [])}
delete_after = tags_dict.get("scratch-delete-after")
if delete_after is None:
return False
delete_after = float(delete_after)
return datetime.now(timezone.utc).timestamp() > delete_after
return [
i
for r in boto3.client("ec2").describe_instances()["Reservations"]
for i in r["Instances"]
if is_running(i) and is_old(i)
] | misc/python/materialize/scratch.py | import asyncio
import csv
import os
import shlex
import sys
import tempfile
from datetime import datetime, timezone
from subprocess import CalledProcessError
from typing import Dict, List, NamedTuple, Optional
import boto3
from mypy_boto3_ec2.service_resource import Instance
from mypy_boto3_ec2.type_defs import (
InstanceNetworkInterfaceSpecificationTypeDef,
InstanceTypeDef,
RunInstancesRequestRequestTypeDef,
)
from prettytable import PrettyTable
from materialize import git, spawn, ssh, ui
SPEAKER = ui.speaker("scratch> ")
ROOT = os.environ["MZ_ROOT"]
def tags(i: Instance) -> Dict[str, str]:
if not i.tags:
return {}
return {t["Key"]: t["Value"] for t in i.tags}
def name(tags: Dict[str, str]) -> Optional[str]:
return tags.get("Name")
def launched_by(tags: Dict[str, str]) -> Optional[str]:
return tags.get("LaunchedBy")
def delete_after(tags: Dict[str, str]) -> Optional[datetime]:
unix = tags.get("scratch-delete-after")
if not unix:
return None
unix = int(float(unix))
return datetime.fromtimestamp(unix)
def print_instances(ists: List[Instance], format: str) -> None:
field_names = [
"Name",
"Instance ID",
"Public IP Address",
"Private IP Address",
"Launched By",
"Delete After",
"State",
]
rows = [
[
name(tags),
i.instance_id,
i.public_ip_address,
i.private_ip_address,
launched_by(tags),
delete_after(tags),
i.state["Name"],
]
for (i, tags) in [(i, tags(i)) for i in ists]
]
if format == "table":
pt = PrettyTable()
pt.field_names = field_names
pt.add_rows(rows)
print(pt)
elif format == "csv":
w = csv.writer(sys.stdout)
w.writerow(field_names)
w.writerows(rows)
else:
raise RuntimeError("Unknown format passed to print_instances")
def launch(
*,
key_name: Optional[str],
instance_type: str,
ami: str,
tags: Dict[str, str],
display_name: Optional[str] = None,
subnet_id: Optional[str] = None,
size_gb: int,
security_group_id: str,
instance_profile: Optional[str],
nonce: str,
delete_after: int,
git_rev: str,
) -> Instance:
"""Launch and configure an ec2 instance with the given properties."""
if display_name:
tags["Name"] = display_name
tags["scratch-delete-after"] = str(delete_after)
tags["nonce"] = nonce
tags["git_ref"] = git.describe()
network_interface: InstanceNetworkInterfaceSpecificationTypeDef = {
"AssociatePublicIpAddress": True,
"DeviceIndex": 0,
"Groups": [security_group_id],
}
if subnet_id:
network_interface["SubnetId"] = subnet_id
SPEAKER(f"launching instance {display_name or '(unnamed)'}")
with open(ROOT + "/misc/load-tests/provision.bash") as f:
provisioning_script = f.read()
kwargs: RunInstancesRequestRequestTypeDef = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": ami,
"InstanceType": instance_type, # type: ignore
"UserData": provisioning_script,
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [{"Key": k, "Value": v} for (k, v) in tags.items()],
}
],
"NetworkInterfaces": [network_interface],
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": size_gb,
"VolumeType": "gp3",
},
}
],
}
if key_name:
kwargs["KeyName"] = key_name
if instance_profile:
kwargs["IamInstanceProfile"] = {"Name": instance_profile}
i = boto3.resource("ec2").create_instances(**kwargs)[0]
print(i.tags)
return i
class CommandResult(NamedTuple):
status: str
stdout: str
stderr: str
async def run_ssm(i: Instance, commands: List[str], timeout: int = 60) -> CommandResult:
id = boto3.client("ssm").send_command(
InstanceIds=[i.instance_id],
DocumentName="AWS-RunShellScript",
Parameters={"commands": commands},
)["Command"]["CommandId"]
async for remaining in ui.async_timeout_loop(timeout, 5):
invocation_dne = boto3.client("ssm").exceptions.InvocationDoesNotExist
SPEAKER(f"Waiting for commands to finish running: {remaining}s remaining")
try:
result = boto3.client("ssm").get_command_invocation(
CommandId=id, InstanceId=i.instance_id
)
except invocation_dne:
continue
if result["Status"] != "InProgress":
return CommandResult(
status=result["Status"],
stdout=result["StandardOutputContent"],
stderr=result["StandardErrorContent"],
)
raise RuntimeError(
f"Command {commands} on instance {i} did not run in a reasonable amount of time"
)
async def setup(
i: Instance,
subnet_id: Optional[str],
local_pub_key: str,
identity_file: str,
git_rev: str,
) -> None:
def is_ready(i: Instance) -> bool:
return bool(
i.public_ip_address and i.state and i.state.get("Name") == "running"
)
done = False
async for remaining in ui.async_timeout_loop(60, 5):
SPEAKER(f"Waiting for instance to become ready: {remaining}s remaining")
i.reload()
if is_ready(i):
done = True
break
if not done:
raise RuntimeError(
f"Instance {i} did not become ready in a reasonable amount of time"
)
done = False
invalid_instance = boto3.client("ssm").exceptions.InvalidInstanceId
commands = [
"mkdir -p ~ubuntu/.ssh",
f"echo {local_pub_key} >> ~ubuntu/.ssh/authorized_keys",
]
import pprint
print("Running commands:")
pprint.pprint(commands)
async for remaining in ui.async_timeout_loop(180, 5):
try:
await run_ssm(i, commands, 180)
done = True
break
except invalid_instance:
pass
if not done:
raise RuntimeError(f"Failed to run SSM commands on instance {i}")
done = False
async for remaining in ui.async_timeout_loop(180, 5):
try:
ssh.runv(
["[", "-f", "/DONE", "]"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
done = True
break
except CalledProcessError:
continue
if not done:
raise RuntimeError(
"Instance did not finish setup in a reasonable amount of time"
)
mkrepo(i, identity_file, git_rev)
def mkrepo(i: Instance, identity_file: str, rev: str) -> None:
"""Create a Materialize repository on the remote ec2 instance and push the present repository to it."""
ssh.runv(
["git", "init", "--bare", "/home/ubuntu/materialize/.git"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
os.chdir(ROOT)
os.environ["GIT_SSH_COMMAND"] = f"ssh -i {identity_file}"
head_rev = git.rev_parse(rev)
git.push(
f"ubuntu@{i.<EMAIL>}:~/materialize/.git",
f"refs/heads/scratch_{head_rev}",
)
ssh.runv(
["git", "-C", "/home/ubuntu/materialize", "config", "core.bare", "false"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
ssh.runv(
["git", "-C", "/home/ubuntu/materialize", "checkout", head_rev],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
class MachineDesc(NamedTuple):
name: str
launch_script: Optional[str]
instance_type: str
ami: str
tags: Dict[str, str]
size_gb: int
async def setup_all(
instances: List[Instance],
subnet_id: str,
local_pub_key: str,
identity_file: str,
git_rev: str,
) -> None:
await asyncio.gather(
*(setup(i, subnet_id, local_pub_key, identity_file, git_rev) for i in instances)
)
def launch_cluster(
descs: List[MachineDesc],
nonce: str,
subnet_id: str,
key_name: Optional[str],
security_group_id: str,
instance_profile: Optional[str],
extra_tags: Dict[str, str],
delete_after: int, # Unix timestamp.
git_rev: str,
) -> List[Instance]:
"""Launch a cluster of instances with a given nonce"""
instances = [
launch(
key_name=key_name,
instance_type=d.instance_type,
ami=d.ami,
tags={**d.tags, **extra_tags},
display_name=f"{nonce}-{d.name}",
size_gb=d.size_gb,
subnet_id=subnet_id,
security_group_id=security_group_id,
instance_profile=instance_profile,
nonce=nonce,
git_rev=git_rev,
delete_after=delete_after,
)
for d in descs
]
# Generate temporary ssh key for running commands remotely
tmpdir = tempfile.TemporaryDirectory()
identity_file = f"{tmpdir.name}/id_rsa"
spawn.runv(["ssh-keygen", "-t", "rsa", "-N", "", "-f", identity_file])
with open(f"{tmpdir.name}/id_rsa.pub") as pk:
local_pub_key = pk.read().strip()
loop = asyncio.get_event_loop()
loop.run_until_complete(
setup_all(instances, subnet_id, local_pub_key, identity_file, git_rev)
)
loop.close()
hosts_str = "".join(
(f"{i.private_ip_address}\t{d.name}\n" for (i, d) in zip(instances, descs))
)
for i in instances:
ssh.runv(
[f"echo {shlex.quote(hosts_str)} | sudo tee -a /etc/hosts"],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
for (i, d) in zip(instances, descs):
if d.launch_script:
ssh.runv(
[
"cd",
"~/materialize",
";",
"nohup",
"bash",
"-c",
shlex.quote(d.launch_script),
">~/mzscratch-startup.out",
"2>~/mzscratch-startup.err",
"&",
],
"ubuntu",
i.public_ip_address,
identity_file=identity_file,
)
tmpdir.cleanup()
return instances
def whoami() -> str:
return boto3.client("sts").get_caller_identity()["UserId"].split(":")[1]
def get_old_instances() -> List[InstanceTypeDef]:
def is_running(i: InstanceTypeDef) -> bool:
return i["State"]["Name"] == "running"
def is_old(i: InstanceTypeDef) -> bool:
tags_dict = {tag["Key"]: tag["Value"] for tag in i.get("Tags", [])}
delete_after = tags_dict.get("scratch-delete-after")
if delete_after is None:
return False
delete_after = float(delete_after)
return datetime.now(timezone.utc).timestamp() > delete_after
return [
i
for r in boto3.client("ec2").describe_instances()["Reservations"]
for i in r["Instances"]
if is_running(i) and is_old(i)
] | 0.517327 | 0.163112 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetAdminRecommendedFolderResult',
'AwaitableGetAdminRecommendedFolderResult',
'get_admin_recommended_folder',
'get_admin_recommended_folder_output',
]
@pulumi.output_type
class GetAdminRecommendedFolderResult:
"""
A collection of values returned by getAdminRecommendedFolder.
"""
def __init__(__self__, description=None, id=None, name=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
class AwaitableGetAdminRecommendedFolderResult(GetAdminRecommendedFolderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAdminRecommendedFolderResult(
description=self.description,
id=self.id,
name=self.name)
def get_admin_recommended_folder(description: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAdminRecommendedFolderResult:
"""
Provides an easy way to retrieve the Admin Recommended Folder.
In order to use the Admin Recommended Folder, you should configure the provider to run in admin mode.
Please refer to the Example Usage section below for more details.
## Attributes reference
The following attributes are exported:
- `id` - The ID of the Admin Recommended Folder.
- `name` - The name of the Admin Recommended Folder.
- `description` - The description of the Admin Recommended Folder.
"""
__args__ = dict()
__args__['description'] = description
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sumologic:index/getAdminRecommendedFolder:getAdminRecommendedFolder', __args__, opts=opts, typ=GetAdminRecommendedFolderResult).value
return AwaitableGetAdminRecommendedFolderResult(
description=__ret__.description,
id=__ret__.id,
name=__ret__.name)
@_utilities.lift_output_func(get_admin_recommended_folder)
def get_admin_recommended_folder_output(description: Optional[pulumi.Input[Optional[str]]] = None,
id: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAdminRecommendedFolderResult]:
"""
Provides an easy way to retrieve the Admin Recommended Folder.
In order to use the Admin Recommended Folder, you should configure the provider to run in admin mode.
Please refer to the Example Usage section below for more details.
## Attributes reference
The following attributes are exported:
- `id` - The ID of the Admin Recommended Folder.
- `name` - The name of the Admin Recommended Folder.
- `description` - The description of the Admin Recommended Folder.
"""
... | sdk/python/pulumi_sumologic/get_admin_recommended_folder.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetAdminRecommendedFolderResult',
'AwaitableGetAdminRecommendedFolderResult',
'get_admin_recommended_folder',
'get_admin_recommended_folder_output',
]
@pulumi.output_type
class GetAdminRecommendedFolderResult:
"""
A collection of values returned by getAdminRecommendedFolder.
"""
def __init__(__self__, description=None, id=None, name=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
class AwaitableGetAdminRecommendedFolderResult(GetAdminRecommendedFolderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAdminRecommendedFolderResult(
description=self.description,
id=self.id,
name=self.name)
def get_admin_recommended_folder(description: Optional[str] = None,
id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAdminRecommendedFolderResult:
"""
Provides an easy way to retrieve the Admin Recommended Folder.
In order to use the Admin Recommended Folder, you should configure the provider to run in admin mode.
Please refer to the Example Usage section below for more details.
## Attributes reference
The following attributes are exported:
- `id` - The ID of the Admin Recommended Folder.
- `name` - The name of the Admin Recommended Folder.
- `description` - The description of the Admin Recommended Folder.
"""
__args__ = dict()
__args__['description'] = description
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sumologic:index/getAdminRecommendedFolder:getAdminRecommendedFolder', __args__, opts=opts, typ=GetAdminRecommendedFolderResult).value
return AwaitableGetAdminRecommendedFolderResult(
description=__ret__.description,
id=__ret__.id,
name=__ret__.name)
@_utilities.lift_output_func(get_admin_recommended_folder)
def get_admin_recommended_folder_output(description: Optional[pulumi.Input[Optional[str]]] = None,
id: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAdminRecommendedFolderResult]:
"""
Provides an easy way to retrieve the Admin Recommended Folder.
In order to use the Admin Recommended Folder, you should configure the provider to run in admin mode.
Please refer to the Example Usage section below for more details.
## Attributes reference
The following attributes are exported:
- `id` - The ID of the Admin Recommended Folder.
- `name` - The name of the Admin Recommended Folder.
- `description` - The description of the Admin Recommended Folder.
"""
... | 0.788746 | 0.079068 |
from conans import ConanFile, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
required_conan_version = ">=1.33.0"
class CalcephConan(ConanFile):
name = "calceph"
description = "C Library designed to access the binary planetary ephemeris " \
"files, such INPOPxx, JPL DExxx and SPICE ephemeris files."
license = ["CECILL-C", "CECILL-B", "CECILL-2.1"]
topics = ("conan", "calceph", "ephemeris", "astronomy", "space", "planet")
homepage = "https://www.imcce.fr/inpop/calceph"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"threadsafe": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"threadsafe": False,
}
_autotools= None
_nmake_args = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
if self.settings.compiler == "Visual Studio":
del self.options.threadsafe
def validate(self):
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("calceph doesn't support shared builds with Visual Studio yet")
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self._settings_build.os == "Windows" and self.settings.compiler != "Visual Studio" and \
not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
@contextmanager
def _msvc_build_environment(self):
with tools.vcvars(self):
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
yield
def build(self):
if self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "Makefile.vc"),
"CFLAGS = /O2 /GR- /MD /nologo /EHs",
"CFLAGS = /nologo /EHs")
with tools.chdir(self._source_subfolder):
with self._msvc_build_environment():
self.run("nmake -f Makefile.vc {}".format(" ".join(self._get_nmake_args())))
else:
autotools = self._configure_autotools()
autotools.make()
def _get_nmake_args(self):
if self._nmake_args:
return self._nmake_args
self._nmake_args = []
self._nmake_args.append("DESTDIR=\"{}\"".format(self.package_folder))
self._nmake_args.extend(["ENABLEF2003=0", "ENABLEF77=0"])
return self._nmake_args
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
args = [
"--disable-static" if self.options.shared else "--enable-static",
"--enable-shared" if self.options.shared else "--disable-shared",
"--enable-thread" if self.options.threadsafe else "--disable-thread",
"--disable-fortran",
"--disable-python",
"--disable-python-package-system",
"--disable-python-package-user",
"--disable-mex-octave",
]
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def package(self):
self.copy(pattern="COPYING*", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
with tools.chdir(self._source_subfolder):
with self._msvc_build_environment():
self.run("nmake -f Makefile.vc install {}".format(" ".join(self._get_nmake_args())))
tools.rmdir(os.path.join(self.package_folder, "doc"))
else:
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
tools.rmdir(os.path.join(self.package_folder, "libexec"))
def package_info(self):
prefix = "lib" if self.settings.compiler == "Visual Studio" else ""
self.cpp_info.libs = ["{}calceph".format(prefix)]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
if self.options.threadsafe:
self.cpp_info.system_libs.append("pthread")
if self.settings.compiler != "Visual Studio":
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path) | recipes/calceph/all/conanfile.py | from conans import ConanFile, AutoToolsBuildEnvironment, VisualStudioBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
required_conan_version = ">=1.33.0"
class CalcephConan(ConanFile):
name = "calceph"
description = "C Library designed to access the binary planetary ephemeris " \
"files, such INPOPxx, JPL DExxx and SPICE ephemeris files."
license = ["CECILL-C", "CECILL-B", "CECILL-2.1"]
topics = ("conan", "calceph", "ephemeris", "astronomy", "space", "planet")
homepage = "https://www.imcce.fr/inpop/calceph"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"threadsafe": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"threadsafe": False,
}
_autotools= None
_nmake_args = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
if self.settings.compiler == "Visual Studio":
del self.options.threadsafe
def validate(self):
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("calceph doesn't support shared builds with Visual Studio yet")
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self._settings_build.os == "Windows" and self.settings.compiler != "Visual Studio" and \
not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
@contextmanager
def _msvc_build_environment(self):
with tools.vcvars(self):
with tools.environment_append(VisualStudioBuildEnvironment(self).vars):
yield
def build(self):
if self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "Makefile.vc"),
"CFLAGS = /O2 /GR- /MD /nologo /EHs",
"CFLAGS = /nologo /EHs")
with tools.chdir(self._source_subfolder):
with self._msvc_build_environment():
self.run("nmake -f Makefile.vc {}".format(" ".join(self._get_nmake_args())))
else:
autotools = self._configure_autotools()
autotools.make()
def _get_nmake_args(self):
if self._nmake_args:
return self._nmake_args
self._nmake_args = []
self._nmake_args.append("DESTDIR=\"{}\"".format(self.package_folder))
self._nmake_args.extend(["ENABLEF2003=0", "ENABLEF77=0"])
return self._nmake_args
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
args = [
"--disable-static" if self.options.shared else "--enable-static",
"--enable-shared" if self.options.shared else "--disable-shared",
"--enable-thread" if self.options.threadsafe else "--disable-thread",
"--disable-fortran",
"--disable-python",
"--disable-python-package-system",
"--disable-python-package-user",
"--disable-mex-octave",
]
self._autotools.configure(args=args, configure_dir=self._source_subfolder)
return self._autotools
def package(self):
self.copy(pattern="COPYING*", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
with tools.chdir(self._source_subfolder):
with self._msvc_build_environment():
self.run("nmake -f Makefile.vc install {}".format(" ".join(self._get_nmake_args())))
tools.rmdir(os.path.join(self.package_folder, "doc"))
else:
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
tools.rmdir(os.path.join(self.package_folder, "libexec"))
def package_info(self):
prefix = "lib" if self.settings.compiler == "Visual Studio" else ""
self.cpp_info.libs = ["{}calceph".format(prefix)]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
if self.options.threadsafe:
self.cpp_info.system_libs.append("pthread")
if self.settings.compiler != "Visual Studio":
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path) | 0.540439 | 0.181173 |
import unittest
try:
from unittest import mock
except ImportError:
import mock
from vsi.tools.python import (Try, is_string_like, BasicDecorator, static,
WarningDecorator, args_to_kwargs,
args_to_kwargs_unbound, args_to_kwargs_easy,
args_to_kwargs_unbound_easy,
ARGS, KWARGS, is_class_method, is_static_method,
ArgvContext, nested_update, nested_in_dict)
import sys
if sys.version_info.major > 2:
from vsi.test.py3_python import *
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class PythonTest(unittest.TestCase):
def test_try(self):
with Try():
5/0
with Try(ValueError):
raise ValueError
with Try(KeyError, ZeroDivisionError, ValueError):
raise ValueError
with self.assertRaises(TypeError):
with Try(KeyError, ZeroDivisionError, ValueError):
raise TypeError
with self.assertRaises(TypeError):
with Try():
pass
raise TypeError
def test_is_string_like(self):
self.assertTrue(is_string_like("ok"))
if sys.version_info[0] == 2:
self.assertTrue(is_string_like(b"ok"))
else:
self.assertFalse(is_string_like(b"ok"))
self.assertTrue(is_string_like(u"ok"))
def test_static(self):
@static(count=0)
def x(y, z):
x.count+=1
return y+z+x.count
self.assertEqual(x(1,1), 3)
self.assertEqual(x(1,1), 4)
self.assertEqual(x(1,1), 5)
self.assertEqual(x.count, 3)
def test_basic_decorator(self):
class MyDecor(BasicDecorator):
''' MD '''
def __init__(self, extra=1):
self.extra = extra
def __inner_call__(self, first_arg, *args, **kwargs):
result = self.fun(first_arg, *args, **kwargs)
return result + self.extra
class MyDecor2(MyDecor):
''' MD2 '''
@MyDecor
def test1(x, y):
''' Ok... '''
return x+y
@MyDecor2(3)
def test2(x, y):
''' T2 '''
return x+y
self.assertEqual(MyDecor.__doc__, ' MD ')
self.assertEqual(MyDecor2.__doc__, ' MD2 ')
self.assertEqual(test1.__doc__, ' Ok... ')
self.assertEqual(test2.__doc__, ' T2 ')
self.assertEqual(test1(11, 22), 34)
self.assertEqual(test2(11, 22), 36)
# @mock.patch('sys.stderr', StringIO())
def test_warning_decorator(self):
out = StringIO('')
@WarningDecorator(output_stream=out)
def fun():
return 16
self.assertEqual(fun(), 16)
self.assertEqual(out.getvalue(), 'Warning\n')
# Have to reload vsi.tools.python since sys.stderr is used as a default
# value
with mock.patch('sys.stderr', StringIO()):
try:
from importlib import reload
except ImportError:
from imp import reload
import vsi.tools.python
reload(vsi.tools.python)
@vsi.tools.python.WarningDecorator("This is a warning")
def fun():
return 15
self.assertEqual(fun(), 15)
self.assertEqual(sys.stderr.getvalue(), 'This is a warning\n')
reload(vsi.tools.python)
def test_is_static_methods(self):
def fun(a):
pass
class A(object):
def b(self):
pass
@staticmethod
def c(d):
pass
@classmethod
def d(cls):
pass
class B(A):
def __new__(cls):
return cls
@staticmethod
def b(a):
pass
def c(self):
pass
def d(self):
pass
A.fun = fun
B.fun = staticmethod(fun)
B.fun2 = classmethod(fun)
self.assertFalse(is_static_method(A, 'fun'))
self.assertTrue(is_static_method(A, '__new__'))
self.assertFalse(is_static_method(A, '__init__'))
self.assertFalse(is_static_method(A, '__subclasshook__'))
self.assertFalse(is_static_method(A, 'b'))
self.assertTrue(is_static_method(A, 'c'))
self.assertFalse(is_static_method(A, 'd'))
a=A()
self.assertFalse(is_static_method(a, 'fun'))
self.assertTrue(is_static_method(a, '__new__'))
self.assertFalse(is_static_method(a, '__init__'))
self.assertFalse(is_static_method(a, '__subclasshook__'))
self.assertFalse(is_static_method(a, 'b'))
self.assertTrue(is_static_method(a, 'c'))
self.assertFalse(is_static_method(a, 'd'))
self.assertTrue(is_static_method(B, 'fun'))
self.assertTrue(is_static_method(B, '__new__'))
self.assertFalse(is_static_method(B, '__init__'))
self.assertFalse(is_static_method(B, '__subclasshook__'))
self.assertTrue(is_static_method(B, 'b'))
self.assertFalse(is_static_method(B, 'c'))
self.assertFalse(is_static_method(B, 'd'))
b = B()
self.assertTrue(is_static_method(b, 'fun'))
self.assertTrue(is_static_method(b, '__new__'))
self.assertFalse(is_static_method(b, '__init__'))
self.assertFalse(is_static_method(b, '__subclasshook__'))
self.assertTrue(is_static_method(b, 'b'))
self.assertFalse(is_static_method(b, 'c'))
self.assertFalse(is_static_method(b, 'd'))
def test_is_static_methods(self):
def fun(a):
pass
class A(object):
def b(self):
pass
@staticmethod
def c(d):
pass
@classmethod
def d(cls):
pass
class B(A):
def __new__(cls):
return cls
@classmethod
def b(a):
pass
def c(self):
pass
def d(self):
pass
A.fun = fun
B.fun = classmethod(fun)
self.assertFalse(is_class_method(A, 'fun'))
self.assertFalse(is_class_method(A, '__new__'))
self.assertFalse(is_class_method(A, '__init__'))
self.assertTrue(is_class_method(A, '__subclasshook__'))
self.assertFalse(is_class_method(A, 'b'))
self.assertFalse(is_class_method(A, 'c'))
self.assertTrue(is_class_method(A, 'd'))
a=A()
self.assertFalse(is_class_method(a, 'fun'))
self.assertFalse(is_class_method(a, '__new__'))
self.assertFalse(is_class_method(a, '__init__'))
self.assertTrue(is_class_method(a, '__subclasshook__'))
self.assertFalse(is_class_method(a, 'b'))
self.assertFalse(is_class_method(a, 'c'))
self.assertTrue(is_class_method(a, 'd'))
self.assertTrue(is_class_method(B, 'fun'))
self.assertFalse(is_class_method(B, '__new__'))
self.assertFalse(is_class_method(B, '__init__'))
self.assertTrue(is_class_method(B, '__subclasshook__'))
self.assertTrue(is_class_method(B, 'b'))
self.assertFalse(is_class_method(B, 'c'))
self.assertFalse(is_class_method(B, 'd'))
b = B()
self.assertTrue(is_class_method(b, 'fun'))
self.assertFalse(is_class_method(b, '__new__'))
self.assertFalse(is_class_method(b, '__init__'))
self.assertTrue(is_class_method(b, '__subclasshook__'))
self.assertTrue(is_class_method(b, 'b'))
self.assertFalse(is_class_method(b, 'c'))
self.assertFalse(is_class_method(b, 'd'))
def test_arg_to_kwargs(self):
def a(x, y, z):
pass
def f(x, y, z=18):
pass
def b(x, *args):
pass
def c(x=15, **kwargs):
pass
def d(a, y=15, *args, **kwargs):
pass
def e(a, **kwargs):
pass
def g(x=11):
pass
def h(*args):
pass
def i(**kwargs):
pass
class A(object):
def __init__(self, a, b=15, *args, **kwargs):
pass
def fun(self, a, b=151, *args, **kwargs):
pass
def __call__(self, a, b=152, *args, **kwargs):
pass
@staticmethod
def stat(a, b=153, *args, **kwargs):
pass
@classmethod
def classy(cls, a, b=157, *args, **kwargs):
pass
aa=A(1)
tests = ((a, [1,2,3], {}),
(f, [1,2,3], {}),
(f, [1,2], {}),
(f, [1,2], {'z':22}),
(b, [1], {}),
(b, [1,2,3], {}),
(c, [1], {'w':22}),
(c, [], {'x':11, 'w':22}),
(c, [], {'w':22}),
(d, [11], {}),
(d, [11, 12], {}),
(d, [11, 12, 13, 14], {}),
(d, [11], {'x':15, 'y':16}),
(d, [], {'a':10, 'x':16}),
(d, [11, 12, 13, 14], {'x':15, 'z':37}),
(e, [1], {'x':14}),
(e, [], {'a':2, 'x':14}),
(g, [], {}),
(g, [1], {}),
(h, [], {}),
(h, [100, 202, 303], {}),
(i, [], {}),
(i, [], {'a': 31, 'b':29}),
(A, [11, 22, 33], {'x':14}),
(A, [11], {}),
(aa.fun, [13, 23, 34], {'x':16}),
(aa.fun, [99], {}),
(aa, [14, 24, 35], {'x':17}),
(aa, [98], {}),
(aa.stat, [12, 33, 44], {'y':35}),
(aa.stat, [21], {}),
(aa.classy, [22, 34, 45], {'y':53}),
(aa.classy, [27], {}),
(d, [111, 222, 333], {'xx':92, 'args':28}))#This is valid python
answers = ({'y': 2, 'x': 1, 'z': 3},
{'y': 2, 'x': 1, 'z': 3},
{'y': 2, 'x': 1, 'z': 18},
{'y': 2, 'x': 1, 'z': 22},
{'x': 1, ARGS: ()},
{'x': 1, ARGS: (2, 3)},
{'x': 1, KWARGS: {'w': 22}},
{'x': 11, KWARGS: {'w': 22}},
{'x': 15, KWARGS: {'w': 22}},
{'a': 11, 'y': 15, KWARGS: {}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {}, ARGS: (13, 14)},
{'a': 11, 'y': 16, KWARGS: {'x': 15}, ARGS: ()},
{'a': 10, 'y': 15, KWARGS: {'x': 16}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {'x': 15, 'z': 37}, ARGS: (13, 14)},
{'a': 1, KWARGS: {'x': 14}},
{'a': 2, KWARGS: {'x': 14}},
{'x': 11},
{'x': 1},
{ARGS: ()},
{ARGS: (100, 202, 303)},
{KWARGS: {}},
{KWARGS: {'a': 31, 'b': 29}},
{'a': 11, 'b': 22, KWARGS: {'x': 14}, ARGS: (33,)},
{'a': 11, 'b': 15, KWARGS: {}, ARGS: ()},
{'a': 13, 'b': 23, KWARGS: {'x': 16}, ARGS: (34,)},
{'a': 99, 'b': 151, KWARGS: {}, ARGS: ()},
{'a': 14, 'b': 24, KWARGS: {'x': 17}, ARGS: (35,)},
{'a': 98, 'b': 152, KWARGS: {}, ARGS: ()},
{'a': 12, 'b': 33, KWARGS: {'y': 35}, ARGS: (44,)},
{'a': 21, 'b': 153, KWARGS: {}, ARGS: ()},
{'a': 22, 'b': 34, KWARGS: {'y': 53}, ARGS: (45,)},
{'a': 27, 'b': 157, KWARGS: {}, ARGS: ()},
{'a': 111, 'y':222, ARGS: (333,), KWARGS: {'xx': 92, 'args':28}})
for test, answer in zip(tests, answers):
self.assertEqual(args_to_kwargs(test[0], test[1], test[2]), answer)
self.assertEqual(args_to_kwargs_easy(test[0], *test[1], **test[2]), answer)
tests = ((A, 'fun', [10, 21, 32], {'x':15}),
(A, 'fun', [100], {}),
(A, 'stat', [12, 33, 44], {'y':35}),
(A, 'stat', [21], {}),
(A, 'classy', [22, 34, 45], {'y':53}),
(A, 'classy', [27], {}))
answers = ({'a': 10, 'b': 21, KWARGS: {'x': 15}, ARGS: (32,)},
{'a': 100, 'b': 151, KWARGS: {}, ARGS: ()},
{'a': 12, 'b': 33, KWARGS: {'y': 35}, ARGS: (44,)},
{'a': 21, 'b': 153, KWARGS: {}, ARGS: ()},
{'a': 22, 'b': 34, KWARGS: {'y': 53}, ARGS: (45,)},
{'a': 27, 'b': 157, KWARGS: {}, ARGS: ()})
for test, answer in zip(tests, answers):
self.assertEqual(args_to_kwargs_unbound(test[0], test[1], test[2], test[3]), answer)
self.assertEqual(args_to_kwargs_unbound_easy(test[0], test[1], *test[2], **test[3]), answer)
if sys.version_info.major == 2:
value = getattr(test[0], test[1])
self.assertEqual(args_to_kwargs(value, test[2], test[3]), answer)
self.assertEqual(args_to_kwargs_easy(value, *test[2], **test[3]), answer)
def test_arg_context(self):
with mock.patch('sys.argv', ['arg0', 'arg1', 'arg2']):
self.assertEqual(sys.argv, ['arg0', 'arg1', 'arg2'])
with ArgvContext('00', '11', '22'):
self.assertEqual(sys.argv, ['00', '11', '22'])
self.assertEqual(sys.argv, ['arg0', 'arg1', 'arg2'])
def test_nested_update(self):
x={"a":1, "b": {"c": 2, "d": 3}, "d": 4}
# Normal update
y={"a":11, "b":{"c":22, "e":33}}
ans={"a":11, "b": {"c": 22, "d": 3, "e":33}, "d": 4}
z=x.copy()
nested_update(z, y)
self.assertEqual(z, ans)
# Keys not there before + replace dict with int
y={"b": 5, "e":{"c":22, "e":33}, "f": 6}
ans={"a":1, "b": 5, "e": {"c": 22, "e":33}, "d": 4, "f":6}
z=x.copy()
nested_update(z, y)
self.assertEqual(z, ans)
y={"a": {"g": 15}}
z=x.copy()
with self.assertRaises(TypeError):
nested_update(z, y)
# Test for dict derived classes
def test_nested_update_dervied(self):
class FooDict(dict):
pass
foo = FooDict({'a': 15, 'b': FooDict({'c': 14, 'f': 18})})
bar = {'a': 16, 'd': {'e': 17}, 'b': {'c': 24}}
nested_update(foo, bar)
ans = {'a': 16, 'd': {'e': 17}, 'b': {'c': 24, 'f':18}}
self.assertEqual(foo, ans)
self.assertIsInstance(foo, FooDict)
self.assertIsInstance(foo['b'], FooDict)
self.assertIsInstance(foo['d'], FooDict)
def test_nested_in_dict(self):
b = {'a': 5, 'b': 6}
self.assertTrue(nested_in_dict({'a': 5}, b))
self.assertFalse(nested_in_dict({'a': 5, 'b':0}, b))
self.assertFalse(nested_in_dict({'a': 5, 'c':7}, b))
c = {'a': 5, 'b': 6, 'c': { 'd': { 'e': 1 }, 'f': 2} }
self.assertTrue(nested_in_dict({'a': 5}, c))
self.assertTrue(nested_in_dict({'c': {}}, c))
self.assertFalse(nested_in_dict({'g': {}}, c))
self.assertTrue(nested_in_dict({'c': {'d':{}}}, c))
self.assertTrue(nested_in_dict({'c': {'d':{'e':1}}}, c))
self.assertTrue(nested_in_dict(c, c)) | python/vsi/test/test_python.py | import unittest
try:
from unittest import mock
except ImportError:
import mock
from vsi.tools.python import (Try, is_string_like, BasicDecorator, static,
WarningDecorator, args_to_kwargs,
args_to_kwargs_unbound, args_to_kwargs_easy,
args_to_kwargs_unbound_easy,
ARGS, KWARGS, is_class_method, is_static_method,
ArgvContext, nested_update, nested_in_dict)
import sys
if sys.version_info.major > 2:
from vsi.test.py3_python import *
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class PythonTest(unittest.TestCase):
def test_try(self):
with Try():
5/0
with Try(ValueError):
raise ValueError
with Try(KeyError, ZeroDivisionError, ValueError):
raise ValueError
with self.assertRaises(TypeError):
with Try(KeyError, ZeroDivisionError, ValueError):
raise TypeError
with self.assertRaises(TypeError):
with Try():
pass
raise TypeError
def test_is_string_like(self):
self.assertTrue(is_string_like("ok"))
if sys.version_info[0] == 2:
self.assertTrue(is_string_like(b"ok"))
else:
self.assertFalse(is_string_like(b"ok"))
self.assertTrue(is_string_like(u"ok"))
def test_static(self):
@static(count=0)
def x(y, z):
x.count+=1
return y+z+x.count
self.assertEqual(x(1,1), 3)
self.assertEqual(x(1,1), 4)
self.assertEqual(x(1,1), 5)
self.assertEqual(x.count, 3)
def test_basic_decorator(self):
class MyDecor(BasicDecorator):
''' MD '''
def __init__(self, extra=1):
self.extra = extra
def __inner_call__(self, first_arg, *args, **kwargs):
result = self.fun(first_arg, *args, **kwargs)
return result + self.extra
class MyDecor2(MyDecor):
''' MD2 '''
@MyDecor
def test1(x, y):
''' Ok... '''
return x+y
@MyDecor2(3)
def test2(x, y):
''' T2 '''
return x+y
self.assertEqual(MyDecor.__doc__, ' MD ')
self.assertEqual(MyDecor2.__doc__, ' MD2 ')
self.assertEqual(test1.__doc__, ' Ok... ')
self.assertEqual(test2.__doc__, ' T2 ')
self.assertEqual(test1(11, 22), 34)
self.assertEqual(test2(11, 22), 36)
# @mock.patch('sys.stderr', StringIO())
def test_warning_decorator(self):
out = StringIO('')
@WarningDecorator(output_stream=out)
def fun():
return 16
self.assertEqual(fun(), 16)
self.assertEqual(out.getvalue(), 'Warning\n')
# Have to reload vsi.tools.python since sys.stderr is used as a default
# value
with mock.patch('sys.stderr', StringIO()):
try:
from importlib import reload
except ImportError:
from imp import reload
import vsi.tools.python
reload(vsi.tools.python)
@vsi.tools.python.WarningDecorator("This is a warning")
def fun():
return 15
self.assertEqual(fun(), 15)
self.assertEqual(sys.stderr.getvalue(), 'This is a warning\n')
reload(vsi.tools.python)
def test_is_static_methods(self):
def fun(a):
pass
class A(object):
def b(self):
pass
@staticmethod
def c(d):
pass
@classmethod
def d(cls):
pass
class B(A):
def __new__(cls):
return cls
@staticmethod
def b(a):
pass
def c(self):
pass
def d(self):
pass
A.fun = fun
B.fun = staticmethod(fun)
B.fun2 = classmethod(fun)
self.assertFalse(is_static_method(A, 'fun'))
self.assertTrue(is_static_method(A, '__new__'))
self.assertFalse(is_static_method(A, '__init__'))
self.assertFalse(is_static_method(A, '__subclasshook__'))
self.assertFalse(is_static_method(A, 'b'))
self.assertTrue(is_static_method(A, 'c'))
self.assertFalse(is_static_method(A, 'd'))
a=A()
self.assertFalse(is_static_method(a, 'fun'))
self.assertTrue(is_static_method(a, '__new__'))
self.assertFalse(is_static_method(a, '__init__'))
self.assertFalse(is_static_method(a, '__subclasshook__'))
self.assertFalse(is_static_method(a, 'b'))
self.assertTrue(is_static_method(a, 'c'))
self.assertFalse(is_static_method(a, 'd'))
self.assertTrue(is_static_method(B, 'fun'))
self.assertTrue(is_static_method(B, '__new__'))
self.assertFalse(is_static_method(B, '__init__'))
self.assertFalse(is_static_method(B, '__subclasshook__'))
self.assertTrue(is_static_method(B, 'b'))
self.assertFalse(is_static_method(B, 'c'))
self.assertFalse(is_static_method(B, 'd'))
b = B()
self.assertTrue(is_static_method(b, 'fun'))
self.assertTrue(is_static_method(b, '__new__'))
self.assertFalse(is_static_method(b, '__init__'))
self.assertFalse(is_static_method(b, '__subclasshook__'))
self.assertTrue(is_static_method(b, 'b'))
self.assertFalse(is_static_method(b, 'c'))
self.assertFalse(is_static_method(b, 'd'))
def test_is_static_methods(self):
def fun(a):
pass
class A(object):
def b(self):
pass
@staticmethod
def c(d):
pass
@classmethod
def d(cls):
pass
class B(A):
def __new__(cls):
return cls
@classmethod
def b(a):
pass
def c(self):
pass
def d(self):
pass
A.fun = fun
B.fun = classmethod(fun)
self.assertFalse(is_class_method(A, 'fun'))
self.assertFalse(is_class_method(A, '__new__'))
self.assertFalse(is_class_method(A, '__init__'))
self.assertTrue(is_class_method(A, '__subclasshook__'))
self.assertFalse(is_class_method(A, 'b'))
self.assertFalse(is_class_method(A, 'c'))
self.assertTrue(is_class_method(A, 'd'))
a=A()
self.assertFalse(is_class_method(a, 'fun'))
self.assertFalse(is_class_method(a, '__new__'))
self.assertFalse(is_class_method(a, '__init__'))
self.assertTrue(is_class_method(a, '__subclasshook__'))
self.assertFalse(is_class_method(a, 'b'))
self.assertFalse(is_class_method(a, 'c'))
self.assertTrue(is_class_method(a, 'd'))
self.assertTrue(is_class_method(B, 'fun'))
self.assertFalse(is_class_method(B, '__new__'))
self.assertFalse(is_class_method(B, '__init__'))
self.assertTrue(is_class_method(B, '__subclasshook__'))
self.assertTrue(is_class_method(B, 'b'))
self.assertFalse(is_class_method(B, 'c'))
self.assertFalse(is_class_method(B, 'd'))
b = B()
self.assertTrue(is_class_method(b, 'fun'))
self.assertFalse(is_class_method(b, '__new__'))
self.assertFalse(is_class_method(b, '__init__'))
self.assertTrue(is_class_method(b, '__subclasshook__'))
self.assertTrue(is_class_method(b, 'b'))
self.assertFalse(is_class_method(b, 'c'))
self.assertFalse(is_class_method(b, 'd'))
def test_arg_to_kwargs(self):
def a(x, y, z):
pass
def f(x, y, z=18):
pass
def b(x, *args):
pass
def c(x=15, **kwargs):
pass
def d(a, y=15, *args, **kwargs):
pass
def e(a, **kwargs):
pass
def g(x=11):
pass
def h(*args):
pass
def i(**kwargs):
pass
class A(object):
def __init__(self, a, b=15, *args, **kwargs):
pass
def fun(self, a, b=151, *args, **kwargs):
pass
def __call__(self, a, b=152, *args, **kwargs):
pass
@staticmethod
def stat(a, b=153, *args, **kwargs):
pass
@classmethod
def classy(cls, a, b=157, *args, **kwargs):
pass
aa=A(1)
tests = ((a, [1,2,3], {}),
(f, [1,2,3], {}),
(f, [1,2], {}),
(f, [1,2], {'z':22}),
(b, [1], {}),
(b, [1,2,3], {}),
(c, [1], {'w':22}),
(c, [], {'x':11, 'w':22}),
(c, [], {'w':22}),
(d, [11], {}),
(d, [11, 12], {}),
(d, [11, 12, 13, 14], {}),
(d, [11], {'x':15, 'y':16}),
(d, [], {'a':10, 'x':16}),
(d, [11, 12, 13, 14], {'x':15, 'z':37}),
(e, [1], {'x':14}),
(e, [], {'a':2, 'x':14}),
(g, [], {}),
(g, [1], {}),
(h, [], {}),
(h, [100, 202, 303], {}),
(i, [], {}),
(i, [], {'a': 31, 'b':29}),
(A, [11, 22, 33], {'x':14}),
(A, [11], {}),
(aa.fun, [13, 23, 34], {'x':16}),
(aa.fun, [99], {}),
(aa, [14, 24, 35], {'x':17}),
(aa, [98], {}),
(aa.stat, [12, 33, 44], {'y':35}),
(aa.stat, [21], {}),
(aa.classy, [22, 34, 45], {'y':53}),
(aa.classy, [27], {}),
(d, [111, 222, 333], {'xx':92, 'args':28}))#This is valid python
answers = ({'y': 2, 'x': 1, 'z': 3},
{'y': 2, 'x': 1, 'z': 3},
{'y': 2, 'x': 1, 'z': 18},
{'y': 2, 'x': 1, 'z': 22},
{'x': 1, ARGS: ()},
{'x': 1, ARGS: (2, 3)},
{'x': 1, KWARGS: {'w': 22}},
{'x': 11, KWARGS: {'w': 22}},
{'x': 15, KWARGS: {'w': 22}},
{'a': 11, 'y': 15, KWARGS: {}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {}, ARGS: (13, 14)},
{'a': 11, 'y': 16, KWARGS: {'x': 15}, ARGS: ()},
{'a': 10, 'y': 15, KWARGS: {'x': 16}, ARGS: ()},
{'a': 11, 'y': 12, KWARGS: {'x': 15, 'z': 37}, ARGS: (13, 14)},
{'a': 1, KWARGS: {'x': 14}},
{'a': 2, KWARGS: {'x': 14}},
{'x': 11},
{'x': 1},
{ARGS: ()},
{ARGS: (100, 202, 303)},
{KWARGS: {}},
{KWARGS: {'a': 31, 'b': 29}},
{'a': 11, 'b': 22, KWARGS: {'x': 14}, ARGS: (33,)},
{'a': 11, 'b': 15, KWARGS: {}, ARGS: ()},
{'a': 13, 'b': 23, KWARGS: {'x': 16}, ARGS: (34,)},
{'a': 99, 'b': 151, KWARGS: {}, ARGS: ()},
{'a': 14, 'b': 24, KWARGS: {'x': 17}, ARGS: (35,)},
{'a': 98, 'b': 152, KWARGS: {}, ARGS: ()},
{'a': 12, 'b': 33, KWARGS: {'y': 35}, ARGS: (44,)},
{'a': 21, 'b': 153, KWARGS: {}, ARGS: ()},
{'a': 22, 'b': 34, KWARGS: {'y': 53}, ARGS: (45,)},
{'a': 27, 'b': 157, KWARGS: {}, ARGS: ()},
{'a': 111, 'y':222, ARGS: (333,), KWARGS: {'xx': 92, 'args':28}})
for test, answer in zip(tests, answers):
self.assertEqual(args_to_kwargs(test[0], test[1], test[2]), answer)
self.assertEqual(args_to_kwargs_easy(test[0], *test[1], **test[2]), answer)
tests = ((A, 'fun', [10, 21, 32], {'x':15}),
(A, 'fun', [100], {}),
(A, 'stat', [12, 33, 44], {'y':35}),
(A, 'stat', [21], {}),
(A, 'classy', [22, 34, 45], {'y':53}),
(A, 'classy', [27], {}))
answers = ({'a': 10, 'b': 21, KWARGS: {'x': 15}, ARGS: (32,)},
{'a': 100, 'b': 151, KWARGS: {}, ARGS: ()},
{'a': 12, 'b': 33, KWARGS: {'y': 35}, ARGS: (44,)},
{'a': 21, 'b': 153, KWARGS: {}, ARGS: ()},
{'a': 22, 'b': 34, KWARGS: {'y': 53}, ARGS: (45,)},
{'a': 27, 'b': 157, KWARGS: {}, ARGS: ()})
for test, answer in zip(tests, answers):
self.assertEqual(args_to_kwargs_unbound(test[0], test[1], test[2], test[3]), answer)
self.assertEqual(args_to_kwargs_unbound_easy(test[0], test[1], *test[2], **test[3]), answer)
if sys.version_info.major == 2:
value = getattr(test[0], test[1])
self.assertEqual(args_to_kwargs(value, test[2], test[3]), answer)
self.assertEqual(args_to_kwargs_easy(value, *test[2], **test[3]), answer)
def test_arg_context(self):
with mock.patch('sys.argv', ['arg0', 'arg1', 'arg2']):
self.assertEqual(sys.argv, ['arg0', 'arg1', 'arg2'])
with ArgvContext('00', '11', '22'):
self.assertEqual(sys.argv, ['00', '11', '22'])
self.assertEqual(sys.argv, ['arg0', 'arg1', 'arg2'])
def test_nested_update(self):
x={"a":1, "b": {"c": 2, "d": 3}, "d": 4}
# Normal update
y={"a":11, "b":{"c":22, "e":33}}
ans={"a":11, "b": {"c": 22, "d": 3, "e":33}, "d": 4}
z=x.copy()
nested_update(z, y)
self.assertEqual(z, ans)
# Keys not there before + replace dict with int
y={"b": 5, "e":{"c":22, "e":33}, "f": 6}
ans={"a":1, "b": 5, "e": {"c": 22, "e":33}, "d": 4, "f":6}
z=x.copy()
nested_update(z, y)
self.assertEqual(z, ans)
y={"a": {"g": 15}}
z=x.copy()
with self.assertRaises(TypeError):
nested_update(z, y)
# Test for dict derived classes
def test_nested_update_dervied(self):
class FooDict(dict):
pass
foo = FooDict({'a': 15, 'b': FooDict({'c': 14, 'f': 18})})
bar = {'a': 16, 'd': {'e': 17}, 'b': {'c': 24}}
nested_update(foo, bar)
ans = {'a': 16, 'd': {'e': 17}, 'b': {'c': 24, 'f':18}}
self.assertEqual(foo, ans)
self.assertIsInstance(foo, FooDict)
self.assertIsInstance(foo['b'], FooDict)
self.assertIsInstance(foo['d'], FooDict)
def test_nested_in_dict(self):
b = {'a': 5, 'b': 6}
self.assertTrue(nested_in_dict({'a': 5}, b))
self.assertFalse(nested_in_dict({'a': 5, 'b':0}, b))
self.assertFalse(nested_in_dict({'a': 5, 'c':7}, b))
c = {'a': 5, 'b': 6, 'c': { 'd': { 'e': 1 }, 'f': 2} }
self.assertTrue(nested_in_dict({'a': 5}, c))
self.assertTrue(nested_in_dict({'c': {}}, c))
self.assertFalse(nested_in_dict({'g': {}}, c))
self.assertTrue(nested_in_dict({'c': {'d':{}}}, c))
self.assertTrue(nested_in_dict({'c': {'d':{'e':1}}}, c))
self.assertTrue(nested_in_dict(c, c)) | 0.358241 | 0.511107 |
class Node:
def __init__(self,data,next=None):
self.next = next
self.data = data
def getValue(self):
return self.data
def setValue(self,value):
self.data = value
def getNext(self):
return self.next
def setNext(self,next):
self.next = next
class List:
def __init__(self):
self.head = None
self.size = 0
def add(self,value):
'''
Insert Value at Head
:param value:
:return:
'''
node = Node(value,self.head)
self.head = node
self.size += 1
def remove(self,value):
# No item to remove
if self.head == None:
return
# Value is the first item, remove from list
if self.head.getValue() == value:
self.head = self.head.getNext()
self.size -= 1
return
currentNode = self.head.getNext()
previousNode = self.head
while currentNode != None and currentNode.getValue() != value:
previousNode = currentNode
currentNode = currentNode.getNext()
if currentNode and currentNode.getValue() == value:
previousNode.setNext(currentNode.getNext())
self.size -= 1
def reverse(self):
'''
Reverse the Linked List
:return:
'''
if self.size == 0 or self.size == 1:
return
previous = None
next = self.head
while next:
temp = next.getNext()
next.setNext(previous)
previous = next
next = temp
self.head = previous
def __str__(self):
values = []
node = self.head
while node:
values.append(node.getValue())
node = node.getNext()
return str(values)
if __name__ == '__main__':
l = List()
print l
l.add(1)
l.add(2)
l.add(3)
print l
l.reverse()
print l
l.remove(3)
print l
l.remove(3)
print l
l.remove(2)
print l | List.py | class Node:
def __init__(self,data,next=None):
self.next = next
self.data = data
def getValue(self):
return self.data
def setValue(self,value):
self.data = value
def getNext(self):
return self.next
def setNext(self,next):
self.next = next
class List:
def __init__(self):
self.head = None
self.size = 0
def add(self,value):
'''
Insert Value at Head
:param value:
:return:
'''
node = Node(value,self.head)
self.head = node
self.size += 1
def remove(self,value):
# No item to remove
if self.head == None:
return
# Value is the first item, remove from list
if self.head.getValue() == value:
self.head = self.head.getNext()
self.size -= 1
return
currentNode = self.head.getNext()
previousNode = self.head
while currentNode != None and currentNode.getValue() != value:
previousNode = currentNode
currentNode = currentNode.getNext()
if currentNode and currentNode.getValue() == value:
previousNode.setNext(currentNode.getNext())
self.size -= 1
def reverse(self):
'''
Reverse the Linked List
:return:
'''
if self.size == 0 or self.size == 1:
return
previous = None
next = self.head
while next:
temp = next.getNext()
next.setNext(previous)
previous = next
next = temp
self.head = previous
def __str__(self):
values = []
node = self.head
while node:
values.append(node.getValue())
node = node.getNext()
return str(values)
if __name__ == '__main__':
l = List()
print l
l.add(1)
l.add(2)
l.add(3)
print l
l.reverse()
print l
l.remove(3)
print l
l.remove(3)
print l
l.remove(2)
print l | 0.667039 | 0.29972 |
from turtle import Turtle, Screen
import random
def start(turtle_instance, x, y, color):
turtle_instance.color(color)
turtle_instance.penup()
turtle_instance.shape('turtle')
turtle_instance.goto(x, y)
screen = Screen()
screen.setup(width=1320, height=600)
user_input = screen.textinput("Turtle Game | Bet", "Which color of turtle will win? Please enter a color").lower()
print(user_input)
def max_x_coordinate(color_list, color_dict, count_f):
if count_f == 0:
max_coordinate = -600
max_color = ''
else:
max_coordinate = max_x
max_color = max_x_color
interim_max = max_coordinate
interim_max_color = max_color
for color in color_list:
if color.pos()[0] > max_coordinate:
interim_max = color.pos()[0]
interim_max_color = color_dict[color].title()
if interim_max_color == max_color:
print(
f'{interim_max_color} is still winning...\
Old: {max_color}:{max_coordinate} New: {interim_max_color}:{interim_max} ')
else:
print(interim_max)
print(
f'{interim_max_color} is winning. \
Old: {max_color}:{max_coordinate} New: {interim_max_color}:{interim_max}')
return interim_max, interim_max_color
red = Turtle()
green = Turtle()
yellow = Turtle()
blue = Turtle()
purple = Turtle()
participant_list = [red, green, yellow, blue, purple]
participant_color_dict = {red: 'red', green: 'green', yellow: 'yellow', blue: 'blue', purple: 'purple'}
start(red, -590, 200, 'red')
start(green, -590, 100, 'green')
start(yellow, -590, 0, 'yellow')
start(blue, -590, -100, 'blue')
start(purple, -590, -200, 'purple')
print(red.pos()[0])
max_x = -600
count = 0
max_jump = 10
while max_x <= 580:
red.fd(random.randint(0, max_jump))
green.fd(random.randint(0, max_jump))
yellow.fd(random.randint(0, max_jump))
blue.fd(random.randint(0, max_jump))
purple.fd(random.randint(0, max_jump))
max_x, max_x_color = max_x_coordinate(participant_list, participant_color_dict, count)
count += 1
print('Max X-coordinate:', max_x, 'Max Color:', max_x_color)
# check winner
if user_input.title() == max_x_color:
print(f'Congratulations, {user_input} won!!!')
else:
print(f'Sorry, {user_input} lost!!!, and {max_x_color} won!!!')
# print({key: rank for rank, key in enumerate(sorted(participant_color_dict, key=participant_color_dict.get,
# reverse=True), 1)})
for participant in participant_list:
print(f'{participant_color_dict[participant].title()}: {participant.pos()[0]}')
screen.exitonclick() | Day 19 - Turtle Races/turtle_races.py | from turtle import Turtle, Screen
import random
def start(turtle_instance, x, y, color):
turtle_instance.color(color)
turtle_instance.penup()
turtle_instance.shape('turtle')
turtle_instance.goto(x, y)
screen = Screen()
screen.setup(width=1320, height=600)
user_input = screen.textinput("Turtle Game | Bet", "Which color of turtle will win? Please enter a color").lower()
print(user_input)
def max_x_coordinate(color_list, color_dict, count_f):
if count_f == 0:
max_coordinate = -600
max_color = ''
else:
max_coordinate = max_x
max_color = max_x_color
interim_max = max_coordinate
interim_max_color = max_color
for color in color_list:
if color.pos()[0] > max_coordinate:
interim_max = color.pos()[0]
interim_max_color = color_dict[color].title()
if interim_max_color == max_color:
print(
f'{interim_max_color} is still winning...\
Old: {max_color}:{max_coordinate} New: {interim_max_color}:{interim_max} ')
else:
print(interim_max)
print(
f'{interim_max_color} is winning. \
Old: {max_color}:{max_coordinate} New: {interim_max_color}:{interim_max}')
return interim_max, interim_max_color
red = Turtle()
green = Turtle()
yellow = Turtle()
blue = Turtle()
purple = Turtle()
participant_list = [red, green, yellow, blue, purple]
participant_color_dict = {red: 'red', green: 'green', yellow: 'yellow', blue: 'blue', purple: 'purple'}
start(red, -590, 200, 'red')
start(green, -590, 100, 'green')
start(yellow, -590, 0, 'yellow')
start(blue, -590, -100, 'blue')
start(purple, -590, -200, 'purple')
print(red.pos()[0])
max_x = -600
count = 0
max_jump = 10
while max_x <= 580:
red.fd(random.randint(0, max_jump))
green.fd(random.randint(0, max_jump))
yellow.fd(random.randint(0, max_jump))
blue.fd(random.randint(0, max_jump))
purple.fd(random.randint(0, max_jump))
max_x, max_x_color = max_x_coordinate(participant_list, participant_color_dict, count)
count += 1
print('Max X-coordinate:', max_x, 'Max Color:', max_x_color)
# check winner
if user_input.title() == max_x_color:
print(f'Congratulations, {user_input} won!!!')
else:
print(f'Sorry, {user_input} lost!!!, and {max_x_color} won!!!')
# print({key: rank for rank, key in enumerate(sorted(participant_color_dict, key=participant_color_dict.get,
# reverse=True), 1)})
for participant in participant_list:
print(f'{participant_color_dict[participant].title()}: {participant.pos()[0]}')
screen.exitonclick() | 0.16654 | 0.177347 |
from __future__ import absolute_import
import mock
from oauth2client.client import AccessTokenCredentials
import unittest
import google.cloud.monitoring
import datalab.context
import datalab.stackdriver.monitoring as gcm
PROJECT = 'my-project'
RESOURCE_TYPES = ['gce_instance', 'aws_ec2_instance']
DISPLAY_NAMES = ['GCE VM Instance', 'Amazon EC2 Instance']
LABELS = [dict(key='instance_id', value_type='STRING',
description='VM instance ID'),
dict(key='project_id', value_type='STRING',
description='Project ID')]
FILTER_STRING = 'resource.type = ends_with("instance")'
class TestCases(unittest.TestCase):
def setUp(self):
self.context = self._create_context()
self.descriptors = gcm.ResourceDescriptors(context=self.context)
@mock.patch('datalab.context._context.Context.default')
def test_constructor_minimal(self, mock_context_default):
mock_context_default.return_value = self.context
descriptors = gcm.ResourceDescriptors()
expected_client = gcm._utils.make_client(context=self.context)
self.assertEqual(descriptors._client.project, expected_client.project)
self.assertEqual(descriptors._client.connection.credentials,
expected_client.connection.credentials)
self.assertIsNone(descriptors._filter_string)
self.assertIsNone(descriptors._descriptors)
def test_constructor_maximal(self):
context = self._create_context(PROJECT)
descriptors = gcm.ResourceDescriptors(
filter_string=FILTER_STRING, project_id=PROJECT, context=context)
expected_client = gcm._utils.make_client(
context=context, project_id=PROJECT)
self.assertEqual(descriptors._client.project, expected_client.project)
self.assertEqual(descriptors._client.connection.credentials,
expected_client.connection.credentials)
self.assertEqual(descriptors._filter_string, FILTER_STRING)
self.assertIsNone(descriptors._descriptors)
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
resource_descriptor_list = self.descriptors.list()
mock_api_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(len(resource_descriptor_list), 2)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[0])
self.assertEqual(resource_descriptor_list[1].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_w_api_filter(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
descriptors = gcm.ResourceDescriptors(
filter_string=FILTER_STRING, context=self.context)
resource_descriptor_list = descriptors.list()
mock_api_list_descriptors.assert_called_once_with(
filter_string=FILTER_STRING)
self.assertEqual(len(resource_descriptor_list), 2)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[0])
self.assertEqual(resource_descriptor_list[1].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_w_pattern_match(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
resource_descriptor_list = self.descriptors.list(pattern='*ec2*')
mock_api_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(len(resource_descriptor_list), 1)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_caching(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = (
self._list_resources_get_result())
actual_list1 = self.descriptors.list()
actual_list2 = self.descriptors.list()
mock_gcloud_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(actual_list1, actual_list2)
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors.list')
def test_as_dataframe(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = (
self._list_resources_get_result())
dataframe = self.descriptors.as_dataframe()
mock_datalab_list_descriptors.assert_called_once_with('*')
expected_headers = list(gcm.ResourceDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.columns.names, [None])
self.assertEqual(dataframe.index.tolist(), list(range(len(RESOURCE_TYPES))))
self.assertEqual(dataframe.index.names, [None])
expected_labels = 'instance_id, project_id'
expected_values = [
[resource_type, display_name, expected_labels]
for resource_type, display_name in zip(RESOURCE_TYPES, DISPLAY_NAMES)]
self.assertEqual(dataframe.values.tolist(), expected_values)
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors.list')
def test_as_dataframe_w_all_args(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = (
self._list_resources_get_result())
dataframe = self.descriptors.as_dataframe(pattern='*instance*', max_rows=1)
mock_datalab_list_descriptors.assert_called_once_with('*instance*')
expected_headers = list(gcm.ResourceDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.index.tolist(), [0])
self.assertEqual(dataframe.iloc[0, 0], RESOURCE_TYPES[0])
@staticmethod
def _create_context(project_id='test'):
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
@staticmethod
def _list_resources_get_result():
all_labels = [google.cloud.monitoring.LabelDescriptor(**labels)
for labels in LABELS]
descriptors = [
google.cloud.monitoring.ResourceDescriptor(
name=None, type_=resource_type, display_name=display_name,
description=None, labels=all_labels,
)
for resource_type, display_name in zip(RESOURCE_TYPES, DISPLAY_NAMES)]
return descriptors | legacy_tests/stackdriver/monitoring/resource_tests.py |
from __future__ import absolute_import
import mock
from oauth2client.client import AccessTokenCredentials
import unittest
import google.cloud.monitoring
import datalab.context
import datalab.stackdriver.monitoring as gcm
PROJECT = 'my-project'
RESOURCE_TYPES = ['gce_instance', 'aws_ec2_instance']
DISPLAY_NAMES = ['GCE VM Instance', 'Amazon EC2 Instance']
LABELS = [dict(key='instance_id', value_type='STRING',
description='VM instance ID'),
dict(key='project_id', value_type='STRING',
description='Project ID')]
FILTER_STRING = 'resource.type = ends_with("instance")'
class TestCases(unittest.TestCase):
def setUp(self):
self.context = self._create_context()
self.descriptors = gcm.ResourceDescriptors(context=self.context)
@mock.patch('datalab.context._context.Context.default')
def test_constructor_minimal(self, mock_context_default):
mock_context_default.return_value = self.context
descriptors = gcm.ResourceDescriptors()
expected_client = gcm._utils.make_client(context=self.context)
self.assertEqual(descriptors._client.project, expected_client.project)
self.assertEqual(descriptors._client.connection.credentials,
expected_client.connection.credentials)
self.assertIsNone(descriptors._filter_string)
self.assertIsNone(descriptors._descriptors)
def test_constructor_maximal(self):
context = self._create_context(PROJECT)
descriptors = gcm.ResourceDescriptors(
filter_string=FILTER_STRING, project_id=PROJECT, context=context)
expected_client = gcm._utils.make_client(
context=context, project_id=PROJECT)
self.assertEqual(descriptors._client.project, expected_client.project)
self.assertEqual(descriptors._client.connection.credentials,
expected_client.connection.credentials)
self.assertEqual(descriptors._filter_string, FILTER_STRING)
self.assertIsNone(descriptors._descriptors)
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
resource_descriptor_list = self.descriptors.list()
mock_api_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(len(resource_descriptor_list), 2)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[0])
self.assertEqual(resource_descriptor_list[1].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_w_api_filter(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
descriptors = gcm.ResourceDescriptors(
filter_string=FILTER_STRING, context=self.context)
resource_descriptor_list = descriptors.list()
mock_api_list_descriptors.assert_called_once_with(
filter_string=FILTER_STRING)
self.assertEqual(len(resource_descriptor_list), 2)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[0])
self.assertEqual(resource_descriptor_list[1].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_w_pattern_match(self, mock_api_list_descriptors):
mock_api_list_descriptors.return_value = self._list_resources_get_result()
resource_descriptor_list = self.descriptors.list(pattern='*ec2*')
mock_api_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(len(resource_descriptor_list), 1)
self.assertEqual(resource_descriptor_list[0].type, RESOURCE_TYPES[1])
@mock.patch('google.cloud.monitoring.Client.list_resource_descriptors')
def test_list_caching(self, mock_gcloud_list_descriptors):
mock_gcloud_list_descriptors.return_value = (
self._list_resources_get_result())
actual_list1 = self.descriptors.list()
actual_list2 = self.descriptors.list()
mock_gcloud_list_descriptors.assert_called_once_with(filter_string=None)
self.assertEqual(actual_list1, actual_list2)
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors.list')
def test_as_dataframe(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = (
self._list_resources_get_result())
dataframe = self.descriptors.as_dataframe()
mock_datalab_list_descriptors.assert_called_once_with('*')
expected_headers = list(gcm.ResourceDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.columns.names, [None])
self.assertEqual(dataframe.index.tolist(), list(range(len(RESOURCE_TYPES))))
self.assertEqual(dataframe.index.names, [None])
expected_labels = 'instance_id, project_id'
expected_values = [
[resource_type, display_name, expected_labels]
for resource_type, display_name in zip(RESOURCE_TYPES, DISPLAY_NAMES)]
self.assertEqual(dataframe.values.tolist(), expected_values)
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors.list')
def test_as_dataframe_w_all_args(self, mock_datalab_list_descriptors):
mock_datalab_list_descriptors.return_value = (
self._list_resources_get_result())
dataframe = self.descriptors.as_dataframe(pattern='*instance*', max_rows=1)
mock_datalab_list_descriptors.assert_called_once_with('*instance*')
expected_headers = list(gcm.ResourceDescriptors._DISPLAY_HEADERS)
self.assertEqual(dataframe.columns.tolist(), expected_headers)
self.assertEqual(dataframe.index.tolist(), [0])
self.assertEqual(dataframe.iloc[0, 0], RESOURCE_TYPES[0])
@staticmethod
def _create_context(project_id='test'):
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
@staticmethod
def _list_resources_get_result():
all_labels = [google.cloud.monitoring.LabelDescriptor(**labels)
for labels in LABELS]
descriptors = [
google.cloud.monitoring.ResourceDescriptor(
name=None, type_=resource_type, display_name=display_name,
description=None, labels=all_labels,
)
for resource_type, display_name in zip(RESOURCE_TYPES, DISPLAY_NAMES)]
return descriptors | 0.566618 | 0.145025 |
from web3.auto import w3
from eth_utils import to_checksum_address
import ipfsApi
import argparse
import logging
#logging.getLogger("SensorSourceEvents").setLevel('DEBUG')
from sensorSource import (SensorSource, SensorSourceEvents, signWithPassword, encodeMultihash, MultiHash)
def sensor(password, sensorKeyFile, sensor, contractAddressFile):
with open(contractAddressFile, 'r') as f:
contractAddress = f.read()
sensorId = to_checksum_address(sensor)
events = SensorSourceEvents(w3, contractAddress)
def handleSubscription(metaDataHashFunction, metaDataHashLength, metaDataHash, requestCount, sensorId):
if to_checksum_address(sensor) == sensorId:
source = SensorSource(w3, ipfsApi.Client(), contractAddress)
sourceOnwerSigningCallback = signWithPassword(sensorKeyFile, password)
requests = ipfsApi.Client().cat(encodeMultihash(MultiHash(metaDataHashFunction, metaDataHashLength, metaDataHash)))
publicationId = requestCount
for pendingRequest in requests.splitlines():
#values = [input('Publication #{} reading #{}: '.format(publicationId, i)) for i in range(int(pendingRequest))]
values = ['Sensor {}, publication #{}, reading #{}'.format(sensorId, publicationId, i) for i in range(int(pendingRequest))]
result = source.publish(
sensorId,
publicationId,
'\n'.join(values),
sourceOnwerSigningCallback
)
if result['success']:
print('Sensor values published as id {}'.format(publicationId))
publicationId -= 1
else:
print('Registration failed: {}', result['error'])
print('Waiting for subscription to sensor @ {} on sensorSource @ {}'.format(sensorId, contractAddress))
events.listen(handlers=dict(Subscribed = handleSubscription))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run as a sensor')
parser.add_argument(
'--password',
metavar='<sensor address password>',
help='The sensors password',
default=''
)
parser.add_argument(
'--sensorKeyFile',
metavar='<sensor-key-file>',
default='../testnet/keystore/UTC--2019-01-17T09-23-30.699759000Z--b91df2b07643a88c323b7fcbad226b377a3fb857'
)
parser.add_argument(
'--sensor',
metavar='<sensor address>',
type=str,
help="The sensors' identity address",
default='0xb91df2b07643a88c323b7fcbad226b377a3fb857'
)
parser.add_argument(
'--contractAddressFile',
default='sensorSource.address'
)
args = parser.parse_args()
sensor(**args.__dict__) | python/sensor.py | from web3.auto import w3
from eth_utils import to_checksum_address
import ipfsApi
import argparse
import logging
#logging.getLogger("SensorSourceEvents").setLevel('DEBUG')
from sensorSource import (SensorSource, SensorSourceEvents, signWithPassword, encodeMultihash, MultiHash)
def sensor(password, sensorKeyFile, sensor, contractAddressFile):
with open(contractAddressFile, 'r') as f:
contractAddress = f.read()
sensorId = to_checksum_address(sensor)
events = SensorSourceEvents(w3, contractAddress)
def handleSubscription(metaDataHashFunction, metaDataHashLength, metaDataHash, requestCount, sensorId):
if to_checksum_address(sensor) == sensorId:
source = SensorSource(w3, ipfsApi.Client(), contractAddress)
sourceOnwerSigningCallback = signWithPassword(sensorKeyFile, password)
requests = ipfsApi.Client().cat(encodeMultihash(MultiHash(metaDataHashFunction, metaDataHashLength, metaDataHash)))
publicationId = requestCount
for pendingRequest in requests.splitlines():
#values = [input('Publication #{} reading #{}: '.format(publicationId, i)) for i in range(int(pendingRequest))]
values = ['Sensor {}, publication #{}, reading #{}'.format(sensorId, publicationId, i) for i in range(int(pendingRequest))]
result = source.publish(
sensorId,
publicationId,
'\n'.join(values),
sourceOnwerSigningCallback
)
if result['success']:
print('Sensor values published as id {}'.format(publicationId))
publicationId -= 1
else:
print('Registration failed: {}', result['error'])
print('Waiting for subscription to sensor @ {} on sensorSource @ {}'.format(sensorId, contractAddress))
events.listen(handlers=dict(Subscribed = handleSubscription))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run as a sensor')
parser.add_argument(
'--password',
metavar='<sensor address password>',
help='The sensors password',
default=''
)
parser.add_argument(
'--sensorKeyFile',
metavar='<sensor-key-file>',
default='../testnet/keystore/UTC--2019-01-17T09-23-30.699759000Z--b91df2b07643a88c323b7fcbad226b377a3fb857'
)
parser.add_argument(
'--sensor',
metavar='<sensor address>',
type=str,
help="The sensors' identity address",
default='0xb91df2b07643a88c323b7fcbad226b377a3fb857'
)
parser.add_argument(
'--contractAddressFile',
default='sensorSource.address'
)
args = parser.parse_args()
sensor(**args.__dict__) | 0.338514 | 0.102036 |
import json
import os
import random
import unittest
from datetime import datetime
try:
from unittest import mock
except ImportError:
import mock
from six import PY2
import pandas as pd
import re
import great_expectations as ge
from great_expectations.dataset import PandasDataset, MetaPandasDataset
from great_expectations.data_asset.data_asset import (
_calc_validation_statistics,
ValidationStatistics,
)
from .test_utils import assertDeepAlmostEqual
def isprime(n):
# https://stackoverflow.com/questions/18833759/python-prime-number-checker
'''check if integer n is a prime'''
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up
# the square root of n for all odd numbers
for x in range(3, int(n**0.5) + 1, 2):
if n % x == 0:
return False
return True
class CustomPandasDataset(PandasDataset):
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_prime(self, column):
return column.map(isprime)
@MetaPandasDataset.expectation(["column", "mostly"])
def expect_column_values_to_equal_1(self, column, mostly=None):
not_null = self[column].notnull()
result = self[column][not_null] == 1
unexpected_values = list(self[column][not_null][result == False])
if mostly:
# Prevent division-by-zero errors
if len(not_null) == 0:
return {
'success': True,
'result': {
'unexpected_list': unexpected_values,
'unexpected_index_list': self.index[result],
}
}
percent_equaling_1 = float(sum(result))/len(not_null)
return {
"success": percent_equaling_1 >= mostly,
'result': {
"unexpected_list": unexpected_values[:20],
"unexpected_index_list": list(self.index[result == False])[:20],
}
}
else:
return {
"success": len(unexpected_values) == 0,
'result': {
"unexpected_list": unexpected_values[:20],
"unexpected_index_list": list(self.index[result == False])[:20],
}
}
class TestCustomClass(unittest.TestCase):
def test_custom_class(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_csv(
script_path+'/test_sets/Titanic.csv',
dataset_class=CustomPandasDataset
)
df.set_default_expectation_argument("result_format", "COMPLETE")
self.assertEqual(
df.expect_column_values_to_be_prime(
'Age')['result']['unexpected_list'],
[30.0, 25.0, 0.92000000000000004, 63.0, 39.0, 58.0, 50.0, 24.0, 36.0, 26.0, 25.0, 25.0, 28.0, 45.0, 39.0,
30.0, 58.0, 45.0, 22.0, 48.0, 44.0, 60.0, 45.0, 58.0, 36.0, 33.0, 36.0, 36.0, 14.0, 49.0, 36.0, 46.0, 27.0,
27.0, 26.0, 64.0, 39.0, 55.0, 70.0, 69.0, 36.0, 39.0, 38.0, 27.0, 27.0, 4.0, 27.0, 50.0, 48.0, 49.0, 48.0,
39.0, 36.0, 30.0, 24.0, 28.0, 64.0, 60.0, 49.0, 44.0, 22.0, 60.0, 48.0, 35.0, 22.0, 45.0, 49.0, 54.0, 38.0,
58.0, 45.0, 46.0, 25.0, 21.0, 48.0, 49.0, 45.0, 36.0, 55.0, 52.0, 24.0, 16.0, 44.0, 51.0, 42.0, 35.0, 35.0,
38.0, 35.0, 50.0, 49.0, 46.0, 58.0, 42.0, 40.0, 42.0, 55.0, 50.0, 16.0, 21.0, 30.0, 15.0, 30.0, 46.0, 54.0,
36.0, 28.0, 65.0, 33.0, 44.0, 55.0, 36.0, 58.0, 64.0, 64.0, 22.0, 28.0, 22.0, 18.0, 52.0, 46.0, 56.0, 33.0,
27.0, 55.0, 54.0, 48.0, 18.0, 21.0, 34.0, 40.0, 36.0, 50.0, 39.0, 56.0, 28.0, 56.0, 56.0, 24.0, 18.0, 24.0,
45.0, 40.0, 6.0, 57.0, 32.0, 62.0, 54.0, 52.0, 62.0, 63.0, 46.0, 52.0, 39.0, 18.0, 48.0, 49.0, 39.0, 46.0,
64.0, 60.0, 60.0, 55.0, 54.0, 21.0, 57.0, 45.0, 50.0, 50.0, 27.0, 20.0, 51.0, 21.0, 36.0, 40.0, 32.0, 33.0,
30.0, 28.0, 18.0, 34.0, 32.0, 57.0, 18.0, 36.0, 28.0, 51.0, 32.0, 28.0, 36.0, 4.0, 1.0, 12.0, 34.0, 26.0,
27.0, 15.0, 45.0, 40.0, 20.0, 25.0, 36.0, 25.0, 42.0, 26.0, 26.0, 0.82999999999999996, 54.0, 44.0, 52.0,
30.0, 30.0, 27.0, 24.0, 35.0, 8.0, 22.0, 30.0, 20.0, 21.0, 49.0, 8.0, 28.0, 18.0, 28.0, 22.0, 25.0, 18.0,
32.0, 18.0, 42.0, 34.0, 8.0, 21.0, 38.0, 38.0, 35.0, 35.0, 38.0, 24.0, 16.0, 26.0, 45.0, 24.0, 21.0, 22.0,
34.0, 30.0, 50.0, 30.0, 1.0, 44.0, 28.0, 6.0, 30.0, 45.0, 24.0, 24.0, 49.0, 48.0, 34.0, 32.0, 21.0, 18.0,
21.0, 52.0, 42.0, 36.0, 21.0, 33.0, 34.0, 22.0, 45.0, 30.0, 26.0, 34.0, 26.0, 22.0, 1.0, 25.0, 48.0, 57.0,
27.0, 30.0, 20.0, 45.0, 46.0, 30.0, 48.0, 54.0, 64.0, 32.0, 18.0, 32.0, 26.0, 20.0, 39.0, 22.0, 24.0, 28.0,
50.0, 20.0, 40.0, 42.0, 21.0, 32.0, 34.0, 33.0, 8.0, 36.0, 34.0, 30.0, 28.0, 0.80000000000000004, 25.0,
50.0, 21.0, 25.0, 18.0, 20.0, 30.0, 30.0, 35.0, 22.0, 25.0, 25.0, 14.0, 50.0, 22.0, 27.0, 27.0, 30.0, 22.0,
35.0, 30.0, 28.0, 12.0, 40.0, 36.0, 28.0, 32.0, 4.0, 36.0, 33.0, 32.0, 26.0, 30.0, 24.0, 18.0, 42.0, 16.0,
35.0, 16.0, 25.0, 18.0, 20.0, 30.0, 26.0, 40.0, 24.0, 18.0, 0.82999999999999996, 20.0, 25.0, 35.0, 32.0,
20.0, 39.0, 39.0, 6.0, 38.0, 9.0, 26.0, 4.0, 20.0, 26.0, 25.0, 18.0, 24.0, 35.0, 40.0, 38.0, 9.0, 45.0,
27.0, 20.0, 32.0, 33.0, 18.0, 40.0, 26.0, 15.0, 45.0, 18.0, 27.0, 22.0, 26.0, 22.0, 20.0, 32.0, 21.0, 18.0,
26.0, 6.0, 9.0, 40.0, 32.0, 26.0, 18.0, 20.0, 22.0, 22.0, 35.0, 21.0, 20.0, 18.0, 18.0, 38.0, 30.0, 21.0,
21.0, 21.0, 24.0, 33.0, 33.0, 28.0, 16.0, 28.0, 24.0, 21.0, 32.0, 26.0, 18.0, 20.0, 24.0, 24.0, 36.0, 30.0,
22.0, 35.0, 27.0, 30.0, 36.0, 9.0, 44.0, 45.0, 22.0, 30.0, 34.0, 28.0, 0.33000000000000002, 27.0, 25.0,
24.0, 22.0, 21.0, 26.0, 33.0, 1.0, 0.17000000000000001, 25.0, 36.0, 36.0, 30.0, 26.0, 65.0, 42.0, 32.0,
30.0, 24.0, 24.0, 24.0, 22.0, 18.0, 16.0, 45.0, 21.0, 18.0, 9.0, 48.0, 16.0, 25.0, 38.0, 22.0, 16.0, 33.0,
9.0, 38.0, 40.0, 14.0, 16.0, 9.0, 10.0, 6.0, 40.0, 32.0, 20.0, 28.0, 24.0, 28.0, 24.0, 20.0, 45.0, 26.0,
21.0, 27.0, 18.0, 26.0, 22.0, 28.0, 22.0, 27.0, 42.0, 27.0, 25.0, 27.0, 20.0, 48.0, 34.0, 22.0, 33.0, 32.0,
26.0, 49.0, 1.0, 33.0, 4.0, 24.0, 32.0, 27.0, 21.0, 32.0, 20.0, 21.0, 30.0, 21.0, 22.0, 4.0, 39.0, 20.0,
21.0, 44.0, 42.0, 21.0, 24.0, 25.0, 22.0, 22.0, 39.0, 26.0, 4.0, 22.0, 26.0, 1.5, 36.0, 18.0, 25.0, 22.0,
20.0, 26.0, 22.0, 32.0, 21.0, 21.0, 36.0, 39.0, 25.0, 45.0, 36.0, 30.0, 20.0, 21.0, 1.5, 25.0, 18.0, 63.0,
18.0, 15.0, 28.0, 36.0, 28.0, 10.0, 36.0, 30.0, 22.0, 14.0, 22.0, 51.0, 18.0, 45.0, 28.0, 21.0, 27.0, 36.0,
27.0, 15.0, 27.0, 26.0, 22.0, 24.0]
)
primes = [3, 5, 7, 11, 13, 17, 23, 31]
df["primes"] = df.Age.map(lambda x: random.choice(primes))
self.assertEqual(
df.expect_column_values_to_be_prime(
"primes")['result']['unexpected_list'],
[]
)
def test_custom_expectation(self):
df = CustomPandasDataset({'x': [1, 1, 1, 1, 2]})
df.set_default_expectation_argument("result_format", "COMPLETE")
out = df.expect_column_values_to_be_prime('x')
t = {'out': {'unexpected_list': [1, 1, 1, 1], 'unexpected_index_list': [
0, 1, 2, 3], 'success': False}}
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'],
out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'],
out['result']['unexpected_list'])
out = df.expect_column_values_to_equal_1('x', mostly=.8)
print(out)
t = {'out': {'unexpected_list': [
2], 'unexpected_index_list': [4], 'success': True}}
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'],
out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'],
out['result']['unexpected_list'])
# Ensure that Custom Data Set classes can properly call non-overridden methods from their parent class
def test_base_class_expectation(self):
df = CustomPandasDataset({
"aaa": [1, 2, 3, 4, 5],
"bbb": [10, 20, 30, 40, 50],
"ccc": [9, 10, 11, 12, 13],
})
self.assertEqual(
df.expect_column_values_to_be_between(
"aaa", min_value=1, max_value=5)['success'],
True
)
class TestValidation(unittest.TestCase):
def test_validate(self):
with open("./tests/test_sets/titanic_expectations.json") as f:
my_expectation_suite = json.load(f)
my_df = ge.read_csv(
"./tests/test_sets/Titanic.csv",
expectation_suite=my_expectation_suite
)
my_df.set_default_expectation_argument("result_format", "COMPLETE")
with mock.patch("datetime.datetime") as mock_datetime:
mock_datetime.utcnow.return_value = datetime(1955, 11, 5)
results = my_df.validate(catch_exceptions=False)
with open('./tests/test_sets/titanic_expected_data_asset_validate_results.json') as f:
expected_results = json.load(f)
del results["meta"]["great_expectations.__version__"]
self.maxDiff = None
# order is not guaranteed (or important in this case) but sorting is possible in PY2
if PY2:
results["results"] = sorted(results["results"])
expected_results["results"] = sorted(expected_results["results"])
assertDeepAlmostEqual(
results,
expected_results
)
# Now, change the results and ensure they are no longer equal
results[0] = {}
self.assertNotEqual(results,
expected_results
)
# Finally, confirm that only_return_failures works
# and does not affect the "statistics" field.
with mock.patch("datetime.datetime") as mock_datetime:
mock_datetime.utcnow.return_value = datetime(1955, 11, 5)
validation_results = my_df.validate(only_return_failures=True)
del validation_results["meta"]["great_expectations.__version__"]
assertDeepAlmostEqual(
validation_results,
{
"meta": {
"data_asset_name": None,
"expectation_suite_name": "default",
"run_id": "1955-11-05T000000Z"
},
"results": [
{"expectation_config": {
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "PClass", "value_set": ["1st", "2nd", "3rd"], "result_format": "COMPLETE"}
},
"success": False,
"exception_info": {"exception_message": None,
"exception_traceback": None,
"raised_exception": False},
"result": {"partial_unexpected_index_list": [456], "unexpected_count": 1, "unexpected_list": ["*"],
"unexpected_percent": 0.0007616146230007616, "element_count": 1313,
"missing_percent": 0.0, "partial_unexpected_counts": [{"count": 1, "value": "*"}],
"partial_unexpected_list": ["*"],
"unexpected_percent_nonmissing": 0.0007616146230007616, "missing_count": 0,
"unexpected_index_list": [456]}}
],
"success": expected_results["success"], # unaffected
"statistics": expected_results["statistics"], # unaffected
}
)
def test_validate_catch_non_existent_expectation(self):
df = ge.dataset.PandasDataset({
"x": [1, 2, 3, 4, 5]
})
validation_config_non_existent_expectation = {
"data_asset_name": None,
"expectation_suite_name": "default",
"meta": {
"great_expectations.__version__": ge.__version__
},
"expectations": [{
"expectation_type": "non_existent_expectation",
"kwargs": {
"column": "x"
}
}]
}
results = df.validate(
expectation_suite=validation_config_non_existent_expectation)['results']
self.assertIn(
"object has no attribute 'non_existent_expectation'",
results[0]['exception_info']['exception_message']
)
def test_validate_catch_invalid_parameter(self):
df = ge.dataset.PandasDataset({
"x": [1, 2, 3, 4, 5]
})
validation_config_invalid_parameter = {
"data_asset_name": None,
"expectation_suite_name": "default",
"meta": {
"great_expectations.__version__": ge.__version__
},
"expectations": [{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "x",
"min_value": 6,
"max_value": 5
}
}]
}
results = df.validate(expectation_suite=validation_config_invalid_parameter)[
'results']
print(results[0]['exception_info'])
self.assertIn(
"min_value cannot be greater than max_value",
results[0]['exception_info']['exception_message']
)
class TestValidationStatisticsCalculation(unittest.TestCase):
def test_no_expectations(self):
expectation_results = []
actual = _calc_validation_statistics(expectation_results)
# pay attention to these two
self.assertEqual(actual.success_percent, None)
self.assertEqual(actual.success, True)
# the rest is boring
self.assertEqual(actual.successful_expectations, 0)
self.assertEqual(actual.evaluated_expectations, 0)
self.assertEqual(actual.unsuccessful_expectations, 0)
def test_no_succesful_expectations(self):
expectation_results = [
{"success": False},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(1, 0, 1, 0., False)
assertDeepAlmostEqual(actual, expected)
expectation_results = [
{"success": False},
{"success": False},
{"success": False},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(3, 0, 3, 0., False)
assertDeepAlmostEqual(actual, expected)
def test_all_succesful_expectations(self):
expectation_results = [
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(1, 1, 0, 100.0, True)
assertDeepAlmostEqual(actual, expected)
expectation_results = [
{"success": True},
{"success": True},
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(3, 3, 0, 100.0, True)
assertDeepAlmostEqual(actual, expected)
def test_mixed_expectations(self):
expectation_results = [
{"success": False},
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(2, 1, 1, 50.0, False)
assertDeepAlmostEqual(actual, expected)
class TestRepeatedAppendExpectation(unittest.TestCase):
def test_validate(self):
with open("./tests/test_sets/titanic_expectations.json") as f:
my_expectation_suite = json.load(f)
my_df = ge.read_csv("./tests/test_sets/Titanic.csv",
profiler=ge.profile.ColumnsExistProfiler)
self.assertEqual(
len(my_df.get_expectation_suite()['expectations']),
7
)
# For column_expectations, _append_expectation should only replace expectations where the expetation_type AND the column match
my_df.expect_column_to_exist("PClass")
self.assertEqual(
len(my_df.get_expectation_suite()['expectations']),
7
)
class TestIO(unittest.TestCase):
def test_read_csv(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_csv(
script_path+'/test_sets/Titanic.csv',
)
def test_read_json(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_json(
script_path+'/test_sets/test_json_data_file.json',
)
df = ge.read_json(
script_path+'/test_sets/nested_test_json_data_file.json',
accessor_func=lambda x: x["data"]
)
def test_read_excel(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
)
assert df['Name'][0] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
# Note that pandas changed the parameter name from sheetname to sheet_name.
# We will test with both options to ensure that the versions are correct.
pandas_version = pd.__version__
if re.match('0\.2[012]\.', pandas_version) is not None:
dfs_dict = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
sheetname=None
)
else:
dfs_dict = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
sheet_name=None
)
assert isinstance(dfs_dict, dict)
assert list(dfs_dict.keys()) == ['Titanic_1', 'Titanic_2', 'Titanic_3']
assert isinstance(dfs_dict['Titanic_1'], PandasDataset)
assert dfs_dict['Titanic_1']['Name'][0] == '<NAME>'
def test_read_table(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_table(
script_path+'/test_sets/Titanic.csv',
sep=','
)
assert df['Name'][0] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
def test_read_parquet(self):
"""
This test is unusual, because on travis (but only on travis), we have observed problems importing pyarrow,
which breaks this test (since it requires pyarrow available).
The issue seems to be related to a binary compatibility issue with the installed/available version of numpy:
pyarrow 0.10 requires numpy >= 1.14.
Since pyarrow is not in our actual requirements, we are not going to adjust up the required numpy version.
"""
# Pass this test if the available version of pandas is less than 0.21.0, because prior
# versions of pandas did not include the read_parquet function.
pandas_version = re.match('0\.(.*)\..*', pd.__version__)
if pandas_version is None:
raise ValueError("Unrecognized pandas version!")
else:
pandas_version = int(pandas_version.group(1))
if pandas_version < 21:
return
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_parquet(
script_path+'/test_sets/Titanic.parquet'
)
assert df['Name'][1] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
if __name__ == "__main__":
unittest.main() | tests/test_great_expectations.py | import json
import os
import random
import unittest
from datetime import datetime
try:
from unittest import mock
except ImportError:
import mock
from six import PY2
import pandas as pd
import re
import great_expectations as ge
from great_expectations.dataset import PandasDataset, MetaPandasDataset
from great_expectations.data_asset.data_asset import (
_calc_validation_statistics,
ValidationStatistics,
)
from .test_utils import assertDeepAlmostEqual
def isprime(n):
# https://stackoverflow.com/questions/18833759/python-prime-number-checker
'''check if integer n is a prime'''
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up
# the square root of n for all odd numbers
for x in range(3, int(n**0.5) + 1, 2):
if n % x == 0:
return False
return True
class CustomPandasDataset(PandasDataset):
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_prime(self, column):
return column.map(isprime)
@MetaPandasDataset.expectation(["column", "mostly"])
def expect_column_values_to_equal_1(self, column, mostly=None):
not_null = self[column].notnull()
result = self[column][not_null] == 1
unexpected_values = list(self[column][not_null][result == False])
if mostly:
# Prevent division-by-zero errors
if len(not_null) == 0:
return {
'success': True,
'result': {
'unexpected_list': unexpected_values,
'unexpected_index_list': self.index[result],
}
}
percent_equaling_1 = float(sum(result))/len(not_null)
return {
"success": percent_equaling_1 >= mostly,
'result': {
"unexpected_list": unexpected_values[:20],
"unexpected_index_list": list(self.index[result == False])[:20],
}
}
else:
return {
"success": len(unexpected_values) == 0,
'result': {
"unexpected_list": unexpected_values[:20],
"unexpected_index_list": list(self.index[result == False])[:20],
}
}
class TestCustomClass(unittest.TestCase):
def test_custom_class(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_csv(
script_path+'/test_sets/Titanic.csv',
dataset_class=CustomPandasDataset
)
df.set_default_expectation_argument("result_format", "COMPLETE")
self.assertEqual(
df.expect_column_values_to_be_prime(
'Age')['result']['unexpected_list'],
[30.0, 25.0, 0.92000000000000004, 63.0, 39.0, 58.0, 50.0, 24.0, 36.0, 26.0, 25.0, 25.0, 28.0, 45.0, 39.0,
30.0, 58.0, 45.0, 22.0, 48.0, 44.0, 60.0, 45.0, 58.0, 36.0, 33.0, 36.0, 36.0, 14.0, 49.0, 36.0, 46.0, 27.0,
27.0, 26.0, 64.0, 39.0, 55.0, 70.0, 69.0, 36.0, 39.0, 38.0, 27.0, 27.0, 4.0, 27.0, 50.0, 48.0, 49.0, 48.0,
39.0, 36.0, 30.0, 24.0, 28.0, 64.0, 60.0, 49.0, 44.0, 22.0, 60.0, 48.0, 35.0, 22.0, 45.0, 49.0, 54.0, 38.0,
58.0, 45.0, 46.0, 25.0, 21.0, 48.0, 49.0, 45.0, 36.0, 55.0, 52.0, 24.0, 16.0, 44.0, 51.0, 42.0, 35.0, 35.0,
38.0, 35.0, 50.0, 49.0, 46.0, 58.0, 42.0, 40.0, 42.0, 55.0, 50.0, 16.0, 21.0, 30.0, 15.0, 30.0, 46.0, 54.0,
36.0, 28.0, 65.0, 33.0, 44.0, 55.0, 36.0, 58.0, 64.0, 64.0, 22.0, 28.0, 22.0, 18.0, 52.0, 46.0, 56.0, 33.0,
27.0, 55.0, 54.0, 48.0, 18.0, 21.0, 34.0, 40.0, 36.0, 50.0, 39.0, 56.0, 28.0, 56.0, 56.0, 24.0, 18.0, 24.0,
45.0, 40.0, 6.0, 57.0, 32.0, 62.0, 54.0, 52.0, 62.0, 63.0, 46.0, 52.0, 39.0, 18.0, 48.0, 49.0, 39.0, 46.0,
64.0, 60.0, 60.0, 55.0, 54.0, 21.0, 57.0, 45.0, 50.0, 50.0, 27.0, 20.0, 51.0, 21.0, 36.0, 40.0, 32.0, 33.0,
30.0, 28.0, 18.0, 34.0, 32.0, 57.0, 18.0, 36.0, 28.0, 51.0, 32.0, 28.0, 36.0, 4.0, 1.0, 12.0, 34.0, 26.0,
27.0, 15.0, 45.0, 40.0, 20.0, 25.0, 36.0, 25.0, 42.0, 26.0, 26.0, 0.82999999999999996, 54.0, 44.0, 52.0,
30.0, 30.0, 27.0, 24.0, 35.0, 8.0, 22.0, 30.0, 20.0, 21.0, 49.0, 8.0, 28.0, 18.0, 28.0, 22.0, 25.0, 18.0,
32.0, 18.0, 42.0, 34.0, 8.0, 21.0, 38.0, 38.0, 35.0, 35.0, 38.0, 24.0, 16.0, 26.0, 45.0, 24.0, 21.0, 22.0,
34.0, 30.0, 50.0, 30.0, 1.0, 44.0, 28.0, 6.0, 30.0, 45.0, 24.0, 24.0, 49.0, 48.0, 34.0, 32.0, 21.0, 18.0,
21.0, 52.0, 42.0, 36.0, 21.0, 33.0, 34.0, 22.0, 45.0, 30.0, 26.0, 34.0, 26.0, 22.0, 1.0, 25.0, 48.0, 57.0,
27.0, 30.0, 20.0, 45.0, 46.0, 30.0, 48.0, 54.0, 64.0, 32.0, 18.0, 32.0, 26.0, 20.0, 39.0, 22.0, 24.0, 28.0,
50.0, 20.0, 40.0, 42.0, 21.0, 32.0, 34.0, 33.0, 8.0, 36.0, 34.0, 30.0, 28.0, 0.80000000000000004, 25.0,
50.0, 21.0, 25.0, 18.0, 20.0, 30.0, 30.0, 35.0, 22.0, 25.0, 25.0, 14.0, 50.0, 22.0, 27.0, 27.0, 30.0, 22.0,
35.0, 30.0, 28.0, 12.0, 40.0, 36.0, 28.0, 32.0, 4.0, 36.0, 33.0, 32.0, 26.0, 30.0, 24.0, 18.0, 42.0, 16.0,
35.0, 16.0, 25.0, 18.0, 20.0, 30.0, 26.0, 40.0, 24.0, 18.0, 0.82999999999999996, 20.0, 25.0, 35.0, 32.0,
20.0, 39.0, 39.0, 6.0, 38.0, 9.0, 26.0, 4.0, 20.0, 26.0, 25.0, 18.0, 24.0, 35.0, 40.0, 38.0, 9.0, 45.0,
27.0, 20.0, 32.0, 33.0, 18.0, 40.0, 26.0, 15.0, 45.0, 18.0, 27.0, 22.0, 26.0, 22.0, 20.0, 32.0, 21.0, 18.0,
26.0, 6.0, 9.0, 40.0, 32.0, 26.0, 18.0, 20.0, 22.0, 22.0, 35.0, 21.0, 20.0, 18.0, 18.0, 38.0, 30.0, 21.0,
21.0, 21.0, 24.0, 33.0, 33.0, 28.0, 16.0, 28.0, 24.0, 21.0, 32.0, 26.0, 18.0, 20.0, 24.0, 24.0, 36.0, 30.0,
22.0, 35.0, 27.0, 30.0, 36.0, 9.0, 44.0, 45.0, 22.0, 30.0, 34.0, 28.0, 0.33000000000000002, 27.0, 25.0,
24.0, 22.0, 21.0, 26.0, 33.0, 1.0, 0.17000000000000001, 25.0, 36.0, 36.0, 30.0, 26.0, 65.0, 42.0, 32.0,
30.0, 24.0, 24.0, 24.0, 22.0, 18.0, 16.0, 45.0, 21.0, 18.0, 9.0, 48.0, 16.0, 25.0, 38.0, 22.0, 16.0, 33.0,
9.0, 38.0, 40.0, 14.0, 16.0, 9.0, 10.0, 6.0, 40.0, 32.0, 20.0, 28.0, 24.0, 28.0, 24.0, 20.0, 45.0, 26.0,
21.0, 27.0, 18.0, 26.0, 22.0, 28.0, 22.0, 27.0, 42.0, 27.0, 25.0, 27.0, 20.0, 48.0, 34.0, 22.0, 33.0, 32.0,
26.0, 49.0, 1.0, 33.0, 4.0, 24.0, 32.0, 27.0, 21.0, 32.0, 20.0, 21.0, 30.0, 21.0, 22.0, 4.0, 39.0, 20.0,
21.0, 44.0, 42.0, 21.0, 24.0, 25.0, 22.0, 22.0, 39.0, 26.0, 4.0, 22.0, 26.0, 1.5, 36.0, 18.0, 25.0, 22.0,
20.0, 26.0, 22.0, 32.0, 21.0, 21.0, 36.0, 39.0, 25.0, 45.0, 36.0, 30.0, 20.0, 21.0, 1.5, 25.0, 18.0, 63.0,
18.0, 15.0, 28.0, 36.0, 28.0, 10.0, 36.0, 30.0, 22.0, 14.0, 22.0, 51.0, 18.0, 45.0, 28.0, 21.0, 27.0, 36.0,
27.0, 15.0, 27.0, 26.0, 22.0, 24.0]
)
primes = [3, 5, 7, 11, 13, 17, 23, 31]
df["primes"] = df.Age.map(lambda x: random.choice(primes))
self.assertEqual(
df.expect_column_values_to_be_prime(
"primes")['result']['unexpected_list'],
[]
)
def test_custom_expectation(self):
df = CustomPandasDataset({'x': [1, 1, 1, 1, 2]})
df.set_default_expectation_argument("result_format", "COMPLETE")
out = df.expect_column_values_to_be_prime('x')
t = {'out': {'unexpected_list': [1, 1, 1, 1], 'unexpected_index_list': [
0, 1, 2, 3], 'success': False}}
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'],
out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'],
out['result']['unexpected_list'])
out = df.expect_column_values_to_equal_1('x', mostly=.8)
print(out)
t = {'out': {'unexpected_list': [
2], 'unexpected_index_list': [4], 'success': True}}
self.assertEqual(t['out']['success'], out['success'])
if 'unexpected_index_list' in t['out']:
self.assertEqual(t['out']['unexpected_index_list'],
out['result']['unexpected_index_list'])
if 'unexpected_list' in t['out']:
self.assertEqual(t['out']['unexpected_list'],
out['result']['unexpected_list'])
# Ensure that Custom Data Set classes can properly call non-overridden methods from their parent class
def test_base_class_expectation(self):
df = CustomPandasDataset({
"aaa": [1, 2, 3, 4, 5],
"bbb": [10, 20, 30, 40, 50],
"ccc": [9, 10, 11, 12, 13],
})
self.assertEqual(
df.expect_column_values_to_be_between(
"aaa", min_value=1, max_value=5)['success'],
True
)
class TestValidation(unittest.TestCase):
def test_validate(self):
with open("./tests/test_sets/titanic_expectations.json") as f:
my_expectation_suite = json.load(f)
my_df = ge.read_csv(
"./tests/test_sets/Titanic.csv",
expectation_suite=my_expectation_suite
)
my_df.set_default_expectation_argument("result_format", "COMPLETE")
with mock.patch("datetime.datetime") as mock_datetime:
mock_datetime.utcnow.return_value = datetime(1955, 11, 5)
results = my_df.validate(catch_exceptions=False)
with open('./tests/test_sets/titanic_expected_data_asset_validate_results.json') as f:
expected_results = json.load(f)
del results["meta"]["great_expectations.__version__"]
self.maxDiff = None
# order is not guaranteed (or important in this case) but sorting is possible in PY2
if PY2:
results["results"] = sorted(results["results"])
expected_results["results"] = sorted(expected_results["results"])
assertDeepAlmostEqual(
results,
expected_results
)
# Now, change the results and ensure they are no longer equal
results[0] = {}
self.assertNotEqual(results,
expected_results
)
# Finally, confirm that only_return_failures works
# and does not affect the "statistics" field.
with mock.patch("datetime.datetime") as mock_datetime:
mock_datetime.utcnow.return_value = datetime(1955, 11, 5)
validation_results = my_df.validate(only_return_failures=True)
del validation_results["meta"]["great_expectations.__version__"]
assertDeepAlmostEqual(
validation_results,
{
"meta": {
"data_asset_name": None,
"expectation_suite_name": "default",
"run_id": "1955-11-05T000000Z"
},
"results": [
{"expectation_config": {
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {"column": "PClass", "value_set": ["1st", "2nd", "3rd"], "result_format": "COMPLETE"}
},
"success": False,
"exception_info": {"exception_message": None,
"exception_traceback": None,
"raised_exception": False},
"result": {"partial_unexpected_index_list": [456], "unexpected_count": 1, "unexpected_list": ["*"],
"unexpected_percent": 0.0007616146230007616, "element_count": 1313,
"missing_percent": 0.0, "partial_unexpected_counts": [{"count": 1, "value": "*"}],
"partial_unexpected_list": ["*"],
"unexpected_percent_nonmissing": 0.0007616146230007616, "missing_count": 0,
"unexpected_index_list": [456]}}
],
"success": expected_results["success"], # unaffected
"statistics": expected_results["statistics"], # unaffected
}
)
def test_validate_catch_non_existent_expectation(self):
df = ge.dataset.PandasDataset({
"x": [1, 2, 3, 4, 5]
})
validation_config_non_existent_expectation = {
"data_asset_name": None,
"expectation_suite_name": "default",
"meta": {
"great_expectations.__version__": ge.__version__
},
"expectations": [{
"expectation_type": "non_existent_expectation",
"kwargs": {
"column": "x"
}
}]
}
results = df.validate(
expectation_suite=validation_config_non_existent_expectation)['results']
self.assertIn(
"object has no attribute 'non_existent_expectation'",
results[0]['exception_info']['exception_message']
)
def test_validate_catch_invalid_parameter(self):
df = ge.dataset.PandasDataset({
"x": [1, 2, 3, 4, 5]
})
validation_config_invalid_parameter = {
"data_asset_name": None,
"expectation_suite_name": "default",
"meta": {
"great_expectations.__version__": ge.__version__
},
"expectations": [{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"column": "x",
"min_value": 6,
"max_value": 5
}
}]
}
results = df.validate(expectation_suite=validation_config_invalid_parameter)[
'results']
print(results[0]['exception_info'])
self.assertIn(
"min_value cannot be greater than max_value",
results[0]['exception_info']['exception_message']
)
class TestValidationStatisticsCalculation(unittest.TestCase):
def test_no_expectations(self):
expectation_results = []
actual = _calc_validation_statistics(expectation_results)
# pay attention to these two
self.assertEqual(actual.success_percent, None)
self.assertEqual(actual.success, True)
# the rest is boring
self.assertEqual(actual.successful_expectations, 0)
self.assertEqual(actual.evaluated_expectations, 0)
self.assertEqual(actual.unsuccessful_expectations, 0)
def test_no_succesful_expectations(self):
expectation_results = [
{"success": False},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(1, 0, 1, 0., False)
assertDeepAlmostEqual(actual, expected)
expectation_results = [
{"success": False},
{"success": False},
{"success": False},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(3, 0, 3, 0., False)
assertDeepAlmostEqual(actual, expected)
def test_all_succesful_expectations(self):
expectation_results = [
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(1, 1, 0, 100.0, True)
assertDeepAlmostEqual(actual, expected)
expectation_results = [
{"success": True},
{"success": True},
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(3, 3, 0, 100.0, True)
assertDeepAlmostEqual(actual, expected)
def test_mixed_expectations(self):
expectation_results = [
{"success": False},
{"success": True},
]
actual = _calc_validation_statistics(expectation_results)
expected = ValidationStatistics(2, 1, 1, 50.0, False)
assertDeepAlmostEqual(actual, expected)
class TestRepeatedAppendExpectation(unittest.TestCase):
def test_validate(self):
with open("./tests/test_sets/titanic_expectations.json") as f:
my_expectation_suite = json.load(f)
my_df = ge.read_csv("./tests/test_sets/Titanic.csv",
profiler=ge.profile.ColumnsExistProfiler)
self.assertEqual(
len(my_df.get_expectation_suite()['expectations']),
7
)
# For column_expectations, _append_expectation should only replace expectations where the expetation_type AND the column match
my_df.expect_column_to_exist("PClass")
self.assertEqual(
len(my_df.get_expectation_suite()['expectations']),
7
)
class TestIO(unittest.TestCase):
def test_read_csv(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_csv(
script_path+'/test_sets/Titanic.csv',
)
def test_read_json(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_json(
script_path+'/test_sets/test_json_data_file.json',
)
df = ge.read_json(
script_path+'/test_sets/nested_test_json_data_file.json',
accessor_func=lambda x: x["data"]
)
def test_read_excel(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
)
assert df['Name'][0] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
# Note that pandas changed the parameter name from sheetname to sheet_name.
# We will test with both options to ensure that the versions are correct.
pandas_version = pd.__version__
if re.match('0\.2[012]\.', pandas_version) is not None:
dfs_dict = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
sheetname=None
)
else:
dfs_dict = ge.read_excel(
script_path+'/test_sets/Titanic_multi_sheet.xlsx',
sheet_name=None
)
assert isinstance(dfs_dict, dict)
assert list(dfs_dict.keys()) == ['Titanic_1', 'Titanic_2', 'Titanic_3']
assert isinstance(dfs_dict['Titanic_1'], PandasDataset)
assert dfs_dict['Titanic_1']['Name'][0] == '<NAME>'
def test_read_table(self):
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_table(
script_path+'/test_sets/Titanic.csv',
sep=','
)
assert df['Name'][0] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
def test_read_parquet(self):
"""
This test is unusual, because on travis (but only on travis), we have observed problems importing pyarrow,
which breaks this test (since it requires pyarrow available).
The issue seems to be related to a binary compatibility issue with the installed/available version of numpy:
pyarrow 0.10 requires numpy >= 1.14.
Since pyarrow is not in our actual requirements, we are not going to adjust up the required numpy version.
"""
# Pass this test if the available version of pandas is less than 0.21.0, because prior
# versions of pandas did not include the read_parquet function.
pandas_version = re.match('0\.(.*)\..*', pd.__version__)
if pandas_version is None:
raise ValueError("Unrecognized pandas version!")
else:
pandas_version = int(pandas_version.group(1))
if pandas_version < 21:
return
script_path = os.path.dirname(os.path.realpath(__file__))
df = ge.read_parquet(
script_path+'/test_sets/Titanic.parquet'
)
assert df['Name'][1] == 'Allen, <NAME>'
assert isinstance(df, PandasDataset)
if __name__ == "__main__":
unittest.main() | 0.589244 | 0.53279 |
import networkx as nx
import os.path
import time
from copy import deepcopy
import random
class LSWLCommunityDiscovery(object):
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, timeout):
# initializes the object
self.graph = graph
self.strength_type = strength_type
self.starting_node = None
self.community = []
self.shell = set()
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.timer_timeout = timeout
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
if start_node in self.graph.nodes():
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
else:
print('Invalid starting node! Try with another one.')
exit(-1)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def amend_small_communities(self):
if len(self.community) < 3:
if len(self.shell) > 0:
start_node_for_amend = max(self.shell, key=self.graph.degree)
next_community_searcher = LSWLCommunityDiscovery(self.graph, self.strength_type, self.timer_timeout)
new_members = next_community_searcher.community_search(start_node_for_amend, amend=False)
for new_member in new_members:
if (new_member in self.community) is False:
self.community.append(new_member)
def community_search(self, start_node, amend=True):
start_timer = time.time()
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
if time.time() > start_timer + self.timer_timeout:
print('Timeout!')
return []
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLCommunityDiscovery.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if amend:
self.amend_small_communities()
self.merge_dangling_nodes()
return sorted(self.community) # sort is only for a better representation, can be ignored to boost performance.
class LSWLCommunityDiscovery_offline(object):
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, timeout):
# initializes the object
self.graph = graph
self.strength_type = strength_type
self.starting_node = None
self.community = []
self.shell = set()
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.timer_timeout = timeout
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
if start_node in self.graph.nodes():
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
else:
print('Invalid starting node! Try with another one.')
exit(-1)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def amend_small_communities(self):
if len(self.community) < 3:
if len(self.shell) > 0:
start_node_for_amend = max(self.shell, key=self.graph.degree)
next_community_searcher = LSWLCommunityDiscovery(self.graph, self.strength_type, self.timer_timeout)
new_members = next_community_searcher.community_search(start_node_for_amend, amend=False)
for new_member in new_members:
if (new_member in self.community) is False:
self.community.append(new_member)
def community_search(self, start_node, amend=True):
start_timer = time.time()
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
if time.time() > start_timer + self.timer_timeout:
print('Timeout!')
return []
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLCommunityDiscovery.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if amend:
self.amend_small_communities()
self.merge_dangling_nodes()
return sorted(self.community) # sort is only for a better representation, can be ignored to boost performance.
class LSWLPlusCommunityDetection():
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, merge_outliers, detect_overlap, nodes_to_ignore=set()):
self.graph = graph
self.graph_copy = deepcopy(self.graph)
self.strength_type = strength_type
self.merge_outliers = merge_outliers
self.detect_overlap = detect_overlap
self.starting_node = None
self.community = []
self.shell = set()
self.nodes_to_ignore = nodes_to_ignore
self.partition = []
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.proccessed_nodes = set()
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
for node in self.nodes_to_ignore:
self.shell.discard(node)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
for node in self.nodes_to_ignore:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
if (neighbor in self.nodes_to_ignore) is False:
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def find_community(self, start_node=None):
if start_node == None:
remaining_nodes = set(self.graph.nodes() - self.proccessed_nodes)
start_node = random.choice(list(remaining_nodes))
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLPlusCommunityDetection.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLPlusCommunityDetection.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLPlusCommunityDetection.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if self.merge_outliers == True:
self.merge_dangling_nodes()
for node in self.community:
self.proccessed_nodes.add(node)
if self.detect_overlap == False:
for node in self.community:
self.nodes_to_ignore.add(node)
self.partition.append(
sorted(self.community)) # sort is only for a better representation, can be ignored to boost performance.
self.reset()
def community_detection(self):
while len(self.proccessed_nodes) < self.graph_copy.number_of_nodes():
self.find_community()
self.nodes_to_ignore.clear()
if self.merge_outliers == True:
self.amend_partition()
return sorted(self.partition)
def amend_partition(self):
communities = [community for community in self.partition if len(community) in [1, 2]]
for community in communities:
self.partition.remove(community)
self.amend_partition_helper(communities)
def amend_partition_helper2(self, community, strength_dict):
index_best_community_to_merge_into = list(strength_dict.keys())[0]
for index_community in strength_dict:
if strength_dict[index_community] > strength_dict[index_best_community_to_merge_into]:
index_best_community_to_merge_into = index_community
for node in community:
if (node in self.partition[index_best_community_to_merge_into]) is False:
self.partition[index_best_community_to_merge_into].append(node)
self.partition[index_best_community_to_merge_into].sort()
def amend_partition_helper(self, communities):
for community in communities:
neighbors = set()
for node in community:
neighbors.update(self.graph_copy.neighbors(node))
strength_dict = {}
for neighbor in neighbors:
for i in range(len(self.partition)):
if neighbor in self.partition[i]:
for node_in_com in community:
if self.graph_copy.has_edge(node_in_com, neighbor):
strength_dict[i] = strength_dict.get(i, 0.0) + self.graph_copy[node_in_com][
neighbor].get('weight', 0.0)
break
self.amend_partition_helper2(community, strength_dict) | cdlib/algorithms/internal/LSWL.py | import networkx as nx
import os.path
import time
from copy import deepcopy
import random
class LSWLCommunityDiscovery(object):
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, timeout):
# initializes the object
self.graph = graph
self.strength_type = strength_type
self.starting_node = None
self.community = []
self.shell = set()
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.timer_timeout = timeout
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
if start_node in self.graph.nodes():
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
else:
print('Invalid starting node! Try with another one.')
exit(-1)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def amend_small_communities(self):
if len(self.community) < 3:
if len(self.shell) > 0:
start_node_for_amend = max(self.shell, key=self.graph.degree)
next_community_searcher = LSWLCommunityDiscovery(self.graph, self.strength_type, self.timer_timeout)
new_members = next_community_searcher.community_search(start_node_for_amend, amend=False)
for new_member in new_members:
if (new_member in self.community) is False:
self.community.append(new_member)
def community_search(self, start_node, amend=True):
start_timer = time.time()
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
if time.time() > start_timer + self.timer_timeout:
print('Timeout!')
return []
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLCommunityDiscovery.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if amend:
self.amend_small_communities()
self.merge_dangling_nodes()
return sorted(self.community) # sort is only for a better representation, can be ignored to boost performance.
class LSWLCommunityDiscovery_offline(object):
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, timeout):
# initializes the object
self.graph = graph
self.strength_type = strength_type
self.starting_node = None
self.community = []
self.shell = set()
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.timer_timeout = timeout
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
if start_node in self.graph.nodes():
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
else:
print('Invalid starting node! Try with another one.')
exit(-1)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def amend_small_communities(self):
if len(self.community) < 3:
if len(self.shell) > 0:
start_node_for_amend = max(self.shell, key=self.graph.degree)
next_community_searcher = LSWLCommunityDiscovery(self.graph, self.strength_type, self.timer_timeout)
new_members = next_community_searcher.community_search(start_node_for_amend, amend=False)
for new_member in new_members:
if (new_member in self.community) is False:
self.community.append(new_member)
def community_search(self, start_node, amend=True):
start_timer = time.time()
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
if time.time() > start_timer + self.timer_timeout:
print('Timeout!')
return []
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLCommunityDiscovery.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLCommunityDiscovery.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if amend:
self.amend_small_communities()
self.merge_dangling_nodes()
return sorted(self.community) # sort is only for a better representation, can be ignored to boost performance.
class LSWLPlusCommunityDetection():
minimum_improvement = 0.000001
def __init__(self, graph, strength_type, merge_outliers, detect_overlap, nodes_to_ignore=set()):
self.graph = graph
self.graph_copy = deepcopy(self.graph)
self.strength_type = strength_type
self.merge_outliers = merge_outliers
self.detect_overlap = detect_overlap
self.starting_node = None
self.community = []
self.shell = set()
self.nodes_to_ignore = nodes_to_ignore
self.partition = []
self.remove_self_loops()
self.dict_common_neighbors = {}
self.max_common_neighbors = {}
self.strength_assigned_nodes = set()
self.proccessed_nodes = set()
def reset(self):
self.community.clear()
self.shell.clear()
def remove_self_loops(self):
for node in self.graph.nodes():
if self.graph.has_edge(node, node):
self.graph.remove_edge(node, node)
def set_start_node(self, start_node):
self.starting_node = start_node
self.community.append(start_node)
self.shell = set(self.graph.neighbors(start_node))
for node in self.nodes_to_ignore:
self.shell.discard(node)
def update_sets_when_node_joins(self, node):
self.community.append(node)
self.update_shell_when_node_joins(node)
def update_shell_when_node_joins(self, new_node):
self.shell.update(self.graph.neighbors(new_node))
for node in self.community:
self.shell.discard(node)
for node in self.nodes_to_ignore:
self.shell.discard(node)
def update_dicts_of_common_neighbors_info(self, node):
if (node in self.dict_common_neighbors) is False:
self.dict_common_neighbors[node] = {}
self.max_common_neighbors[node] = -1
for neighbor in self.graph.neighbors(node):
if (neighbor in self.dict_common_neighbors[node]) is False:
if (neighbor in self.dict_common_neighbors) is False:
self.dict_common_neighbors[neighbor] = {}
self.max_common_neighbors[neighbor] = -1
number_common_neighbors = sum(1 for _ in nx.common_neighbors(self.graph, node, neighbor))
self.dict_common_neighbors[node][neighbor] = number_common_neighbors
self.dict_common_neighbors[neighbor][node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[node]:
self.max_common_neighbors[node] = number_common_neighbors
if number_common_neighbors > self.max_common_neighbors[neighbor]:
self.max_common_neighbors[neighbor] = number_common_neighbors
def assign_local_strength(self, node):
if node in self.strength_assigned_nodes:
return
self.update_dicts_of_common_neighbors_info(node)
max_mutual_node = self.max_common_neighbors.get(node)
for neighbor in self.graph.neighbors(node):
max_mutual_neighbor = self.max_common_neighbors.get(neighbor)
strength = self.dict_common_neighbors.get(node).get(neighbor)
try:
s1 = strength / max_mutual_node
except ZeroDivisionError:
s1 = 0.0
try:
s2 = strength / max_mutual_neighbor
except ZeroDivisionError:
s2 = 0.0
strength = s1 + s2 - 1.0 if self.strength_type == 1 else (s1 + s2) / 2.0
self.graph.add_edge(node, neighbor, strength=strength)
self.strength_assigned_nodes.add(node)
def find_best_next_node(self, improvements):
new_node = self.community[-1]
for node in self.shell:
if (node in improvements) is False:
improvements[node] = self.graph[node][new_node].get('strength', 0.0)
elif self.graph.has_edge(node, new_node):
improvements[node] += self.graph[node][new_node].get('strength', 0.0)
if new_node in improvements:
del improvements[new_node]
best_candidate = None
best_improvement = -float('inf')
for candidate in self.shell:
if improvements[candidate] > best_improvement:
best_candidate = candidate
best_improvement = improvements[candidate]
return best_candidate, best_improvement
def merge_dangling_nodes(self):
neighborhood = set()
for node in self.community:
for neighbor in self.graph.neighbors(node):
if (neighbor in self.nodes_to_ignore) is False:
neighborhood.add(neighbor)
dangling_neighbors = [node for node in neighborhood if self.graph.degree[node] == 1]
self.community = list(set(self.community + dangling_neighbors))
def find_community(self, start_node=None):
if start_node == None:
remaining_nodes = set(self.graph.nodes() - self.proccessed_nodes)
start_node = random.choice(list(remaining_nodes))
self.set_start_node(start_node)
self.assign_local_strength(self.starting_node)
improvements = {}
while len(self.community) < self.graph.number_of_nodes() and len(self.shell) > 0:
for node in self.shell:
self.assign_local_strength(node)
new_node, improvement = self.find_best_next_node(improvements)
if self.strength_type == 1 and improvement < LSWLPlusCommunityDetection.minimum_improvement:
break
if self.strength_type == 2:
if len(self.community) > 3 and improvement < 1.0 + LSWLPlusCommunityDetection.minimum_improvement:
break
elif len(self.community) < 3 and improvement < LSWLPlusCommunityDetection.minimum_improvement:
break
self.update_sets_when_node_joins(new_node)
if self.merge_outliers == True:
self.merge_dangling_nodes()
for node in self.community:
self.proccessed_nodes.add(node)
if self.detect_overlap == False:
for node in self.community:
self.nodes_to_ignore.add(node)
self.partition.append(
sorted(self.community)) # sort is only for a better representation, can be ignored to boost performance.
self.reset()
def community_detection(self):
while len(self.proccessed_nodes) < self.graph_copy.number_of_nodes():
self.find_community()
self.nodes_to_ignore.clear()
if self.merge_outliers == True:
self.amend_partition()
return sorted(self.partition)
def amend_partition(self):
communities = [community for community in self.partition if len(community) in [1, 2]]
for community in communities:
self.partition.remove(community)
self.amend_partition_helper(communities)
def amend_partition_helper2(self, community, strength_dict):
index_best_community_to_merge_into = list(strength_dict.keys())[0]
for index_community in strength_dict:
if strength_dict[index_community] > strength_dict[index_best_community_to_merge_into]:
index_best_community_to_merge_into = index_community
for node in community:
if (node in self.partition[index_best_community_to_merge_into]) is False:
self.partition[index_best_community_to_merge_into].append(node)
self.partition[index_best_community_to_merge_into].sort()
def amend_partition_helper(self, communities):
for community in communities:
neighbors = set()
for node in community:
neighbors.update(self.graph_copy.neighbors(node))
strength_dict = {}
for neighbor in neighbors:
for i in range(len(self.partition)):
if neighbor in self.partition[i]:
for node_in_com in community:
if self.graph_copy.has_edge(node_in_com, neighbor):
strength_dict[i] = strength_dict.get(i, 0.0) + self.graph_copy[node_in_com][
neighbor].get('weight', 0.0)
break
self.amend_partition_helper2(community, strength_dict) | 0.383295 | 0.267647 |
import datetime
import pickle
import time
import numpy as np
import tensorflow as tf
def stack_data(data, num_shifts, len_time):
"""Stack data from a 2D array into a 3D array.
Arguments:
data -- 2D data array to be reshaped
num_shifts -- number of shifts (time steps) that losses will use (maximum is len_time - 1)
len_time -- number of time steps in each trajectory in data
Returns:
data_tensor -- data reshaped into 3D array, shape: num_shifts + 1, num_traj * (len_time - num_shifts), n
Side effects:
None
"""
nd = data.ndim
if nd > 1:
n = data.shape[1]
else:
data = (np.asmatrix(data)).getT()
n = 1
num_traj = int(data.shape[0] / len_time)
new_len_time = len_time - num_shifts
data_tensor = np.zeros([num_shifts + 1, num_traj * new_len_time, n])
for j in np.arange(num_shifts + 1):
for count in np.arange(num_traj):
data_tensor_range = np.arange(count * new_len_time, new_len_time + count * new_len_time)
data_tensor[j, data_tensor_range, :] = data[count * len_time + j: count * len_time + j + new_len_time, :]
return data_tensor
def choose_optimizer(params, regularized_loss, trainable_var):
"""Choose which optimizer to use for the network training.
Arguments:
params -- dictionary of parameters for experiment
regularized_loss -- loss, including regularization
trainable_var -- list of trainable TensorFlow variables
Returns:
optimizer -- optimizer from TensorFlow Class optimizer
Side effects:
None
Raises ValueError if params['opt_alg'] is not 'adam', 'adadelta', 'adagrad', 'adagradDA', 'ftrl', 'proximalGD',
'proximalAdagrad', or 'RMS'
"""
if params['opt_alg'] == 'adam':
optimizer = tf.train.AdamOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'adadelta':
if params['decay_rate'] > 0:
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# defaults 0.001, 0.95
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagrad':
# also has initial_accumulator_value parameter
optimizer = tf.train.AdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagradDA':
# Be careful when using AdagradDA for deep networks as it will require careful initialization of the gradient
# accumulators for it to train.
optimizer = tf.train.AdagradDAOptimizer(params['learning_rate'], tf.get_global_step()).minimize(
regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'ftrl':
# lots of hyperparameters: learning_rate_power, initial_accumulator_value,
# l1_regularization_strength, l2_regularization_strength
optimizer = tf.train.FtrlOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'proximalGD':
# can have built-in reg.
optimizer = tf.train.ProximalGradientDescentOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'proximalAdagrad':
# initial_accumulator_value, reg.
optimizer = tf.train.ProximalAdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'RMS':
# momentum, epsilon, centered (False/True)
if params['decay_rate'] > 0:
optimizer = tf.train.RMSPropOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# default decay_rate 0.9
optimizer = tf.train.RMSPropOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
else:
raise ValueError("chose invalid opt_alg %s in params dict" % params['opt_alg'])
return optimizer
def check_progress(start, best_error, params):
"""Check on the progress of the network training and decide if it's time to stop.
Arguments:
start -- time that experiment started
best_error -- best error so far in training
params -- dictionary of parameters for experiment
Returns:
finished -- 0 if should continue training, 1 if should stop training
save_now -- 0 if don't need to save results, 1 if should save results
Side effects:
May update params dict: stop_condition, been5min, been20min, been40min, been1hr, been2hr, been3hr, been4hr,
beenHalf
"""
finished = 0
save_now = 0
current_time = time.time()
if not params['been5min']:
# only check 5 min progress once
if current_time - start > 5 * 60:
if best_error > params['min_5min']:
print("too slowly improving in first five minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 5 min'
finished = 1
return finished, save_now
else:
print("been 5 minutes, err = %.15f < %.15f" % (best_error, params['min_5min']))
params['been5min'] = best_error
if not params['been20min']:
# only check 20 min progress once
if current_time - start > 20 * 60:
if best_error > params['min_20min']:
print("too slowly improving in first 20 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 20 min'
finished = 1
return finished, save_now
else:
print("been 20 minutes, err = %.15f < %.15f" % (best_error, params['min_20min']))
params['been20min'] = best_error
if not params['been40min']:
# only check 40 min progress once
if current_time - start > 40 * 60:
if best_error > params['min_40min']:
print("too slowly improving in first 40 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 40 min'
finished = 1
return finished, save_now
else:
print("been 40 minutes, err = %.15f < %.15f" % (best_error, params['min_40min']))
params['been40min'] = best_error
if not params['been1hr']:
# only check 1 hr progress once
if current_time - start > 60 * 60:
if best_error > params['min_1hr']:
print("too slowly improving in first hour: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first hour'
finished = 1
return finished, save_now
else:
print("been 1 hour, err = %.15f < %.15f" % (best_error, params['min_1hr']))
save_now = 1
params['been1hr'] = best_error
if not params['been2hr']:
# only check 2 hr progress once
if current_time - start > 2 * 60 * 60:
if best_error > params['min_2hr']:
print("too slowly improving in first two hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first two hours'
finished = 1
return finished, save_now
else:
print("been 2 hours, err = %.15f < %.15f" % (best_error, params['min_2hr']))
save_now = 1
params['been2hr'] = best_error
if not params['been3hr']:
# only check 3 hr progress once
if current_time - start > 3 * 60 * 60:
if best_error > params['min_3hr']:
print("too slowly improving in first three hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first three hours'
finished = 1
return finished, save_now
else:
print("been 3 hours, err = %.15f < %.15f" % (best_error, params['min_3hr']))
save_now = 1
params['been3hr'] = best_error
if not params['been4hr']:
# only check 4 hr progress once
if current_time - start > 4 * 60 * 60:
if best_error > params['min_4hr']:
print("too slowly improving in first four hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first four hours'
finished = 1
return finished, save_now
else:
print("been 4 hours, err = %.15f < %.15f" % (best_error, params['min_4hr']))
save_now = 1
params['been4hr'] = best_error
if not params['beenHalf']:
# only check halfway progress once
if current_time - start > params['max_time'] / 2:
if best_error > params['min_halfway']:
print("too slowly improving 1/2 of way in: val err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving halfway in'
finished = 1
return finished, save_now
else:
print("Halfway through time, err = %.15f < %.15f" % (best_error, params['min_halfway']))
params['beenHalf'] = best_error
if current_time - start > params['max_time']:
params['stop_condition'] = 'past max time'
finished = 1
return finished, save_now
return finished, save_now
def save_files(sess, csv_path, train_val_error, params, weights, biases):
"""Save error files, weights, biases, and parameters.
Arguments:
sess -- TensorFlow session
csv_path -- string for path to save error file as csv
train_val_error -- table of training and validation errors
params -- dictionary of parameters for experiment
weights -- dictionary of weights for all networks
biases -- dictionary of biases for all networks
Returns:
None (but side effect of saving files and updating params dict.)
Side effects:
Save train_val_error, each weight W, each bias b, and params dict to file.
Update params dict: minTrain, minTest, minRegTrain, minRegTest
"""
np.savetxt(csv_path, train_val_error, delimiter=',')
for key, value in weights.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
for key, value in biases.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
params['minTrain'] = np.min(train_val_error[:, 0])
params['minTest'] = np.min(train_val_error[:, 1])
params['minRegTrain'] = np.min(train_val_error[:, 2])
params['minRegTest'] = np.min(train_val_error[:, 3])
print("min train: %.12f, min val: %.12f, min reg. train: %.12f, min reg. val: %.12f" % (
params['minTrain'], params['minTest'], params['minRegTrain'], params['minRegTest']))
save_params(params)
def save_params(params):
"""Save parameter dictionary to file.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None
Side effects:
Saves params dict to pkl file
"""
with open(params['model_path'].replace('ckpt', 'pkl'), 'wb') as f:
pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)
def set_defaults(params):
"""Set defaults and make some checks in parameters dictionary.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None (but side effect of updating params dict)
Side effects:
May update params dict
Raises KeyError if params is missing data_name, len_time, data_train_len, delta_t, widths, hidden_widths_omega,
num_evals, num_real, or num_complex_pairs
Raises ValueError if num_evals != 2 * num_complex_pairs + num_real
"""
# defaults related to dataset
if 'data_name' not in params:
raise KeyError("Error: must give data_name as input to main")
if 'len_time' not in params:
raise KeyError("Error, must give len_time as input to main")
if 'data_train_len' not in params:
raise KeyError("Error, must give data_train_len as input to main")
if 'delta_t' not in params:
raise KeyError("Error, must give delta_t as input to main")
# defaults related to saving results
if 'folder_name' not in params:
print("setting default: using folder named 'results'")
params['folder_name'] = 'results'
if 'exp_suffix' not in params:
print("setting default name of experiment")
params['exp_suffix'] = '_' + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
if 'model_path' not in params:
print("setting default path for model")
exp_name = params['data_name'] + params['exp_suffix']
params['model_path'] = "./%s/%s_model.ckpt" % (params['folder_name'], exp_name)
# defaults related to network architecture
if 'widths' not in params:
raise KeyError("Error, must give widths as input to main")
print(params['widths'])
if 'hidden_widths_omega' not in params:
raise KeyError("Error, must give hidden_widths for omega net")
params['widths_omega_complex'] = [1, ] + params['hidden_widths_omega'] + [2, ]
params['widths_omega_real'] = [1, ] + params['hidden_widths_omega'] + [1, ]
print(params['widths_omega_complex'])
print(params['widths_omega_real'])
if 'act_type' not in params:
print("setting default: activation function is ReLU")
params['act_type'] = 'relu'
if 'num_evals' not in params:
raise KeyError("Error, must give number of evals: num_evals")
if 'num_real' not in params:
raise KeyError("Error, must give number of real eigenvalues: num_real")
if 'num_complex_pairs' not in params:
raise KeyError("Error, must give number of pairs of complex eigenvalues: num_complex_pairs")
if params['num_evals'] != (2 * params['num_complex_pairs'] + params['num_real']):
raise ValueError("Error, num_evals must equal 2*num_compex_pairs + num_real")
params['d'] = len(params['widths']) # d must be calculated like this
# defaults related to initialization of parameters
if 'seed' not in params:
random_seed = np.random.randint(2 ** 30)
print("setting default: choosing random seed of %d and saving to params" % random_seed)
params['seed'] = random_seed
if 'dist_weights' not in params:
print("setting default: distribution for weights on main net is tn (truncated normal)")
params['dist_weights'] = 'tn'
if 'dist_weights_omega' not in params:
print("setting default: distribution for weights on auxiliary net is tn (truncated normal)")
params['dist_weights_omega'] = 'tn'
if 'dist_biases' not in params:
print("setting default: biases in main net will be init. to default number")
params['dist_biases'] = 0
if 'dist_biases_omega' not in params:
print("setting default: biases in auxiliary net will be init. to default number")
params['dist_biases_omega'] = 0
if 'scale' not in params:
print("setting default: scale for weights in main net is 0.1 (applies to tn distribution)")
params['scale'] = 0.1
if 'scale_omega' not in params:
print("setting default: scale for weights in omega net is 0.1 (applies to tn distribution)")
params['scale_omega'] = 0.1
if isinstance(params['dist_weights'], str):
params['dist_weights'] = [params['dist_weights']] * (len(params['widths']) - 1)
if isinstance(params['dist_biases'], int):
params['dist_biases'] = [params['dist_biases']] * (len(params['widths']) - 1)
if isinstance(params['dist_weights_omega'], str):
params['dist_weights_omega'] = [params['dist_weights_omega']] * (len(params['widths_omega_real']) - 1)
if isinstance(params['dist_biases_omega'], int):
params['dist_biases_omega'] = [params['dist_biases_omega']] * (len(params['widths_omega_real']) - 1)
# defaults related to loss function
if 'auto_first' not in params:
params['auto_first'] = 0
if 'relative_loss' not in params:
print("setting default: loss is not relative")
params['relative_loss'] = 0
if 'shifts' not in params:
print("setting default: penalty on all shifts from 1 to num_shifts")
params['shifts'] = np.arange(params['num_shifts']) + 1
if 'shifts_middle' not in params:
print("setting default: penalty on all middle shifts from 1 to num_shifts_middle")
params['shifts_middle'] = np.arange(params['num_shifts_middle']) + 1
params['num_shifts'] = len(params['shifts']) # must be calculated like this
params['num_shifts_middle'] = len(params['shifts_middle']) # must be calculated like this
if 'recon_lam' not in params:
print("setting default: weight on reconstruction is 1.0")
params['recon_lam'] = 1.0
if 'mid_shift_lam' not in params:
print("setting default: weight on loss3 is 1.0")
params['mid_shift_lam'] = 1.0
if 'L1_lam' not in params:
print("setting default: L1_lam is .00001")
params['L1_lam'] = .00001
if 'L2_lam' not in params:
print("setting default: no L2 regularization")
params['L2_lam'] = 0.0
if 'Linf_lam' not in params:
print("setting default: no L_inf penalty")
params['Linf_lam'] = 0.0
# defaults related to training
if 'num_passes_per_file' not in params:
print("setting default: 1000 passes per training file")
params['num_passes_per_file'] = 1000
if 'num_steps_per_batch' not in params:
print("setting default: 1 step per batch before moving to next training file")
params['num_steps_per_batch'] = 1
if 'num_steps_per_file_pass' not in params:
print("setting default: up to 1000000 steps per training file before moving to next one")
params['num_steps_per_file_pass'] = 1000000
if 'learning_rate' not in params:
print("setting default learning rate")
params['learning_rate'] = .003
if 'opt_alg' not in params:
print("setting default: use Adam optimizer")
params['opt_alg'] = 'adam'
if 'decay_rate' not in params:
print("setting default: decay_rate is 0 (applies to some optimizer algorithms)")
params['decay_rate'] = 0
if 'batch_size' not in params:
print("setting default: no batches (use whole training file at once)")
params['batch_size'] = 0
# setting defaults related to keeping track of training time and progress
if 'max_time' not in params:
print("setting default: run up to 6 hours")
params['max_time'] = 6 * 60 * 60 # 6 hours
if 'min_5min' not in params:
params['min_5min'] = 10 ** (-2)
print("setting default: must reach %f in 5 minutes" % params['min_5min'])
if 'min_20min' not in params:
params['min_20min'] = 10 ** (-3)
print("setting default: must reach %f in 20 minutes" % params['min_20min'])
if 'min_40min' not in params:
params['min_40min'] = 10 ** (-4)
print("setting default: must reach %f in 40 minutes" % params['min_40min'])
if 'min_1hr' not in params:
params['min_1hr'] = 10 ** (-5)
print("setting default: must reach %f in 1 hour" % params['min_1hr'])
if 'min_2hr' not in params:
params['min_2hr'] = 10 ** (-5.25)
print("setting default: must reach %f in 2 hours" % params['min_2hr'])
if 'min_3hr' not in params:
params['min_3hr'] = 10 ** (-5.5)
print("setting default: must reach %f in 3 hours" % params['min_3hr'])
if 'min_4hr' not in params:
params['min_4hr'] = 10 ** (-5.75)
print("setting default: must reach %f in 4 hours" % params['min_4hr'])
if 'min_halfway' not in params:
params['min_halfway'] = 10 ** (-4)
print("setting default: must reach %f in first half of time allotted" % params['min_halfway'])
# initializing trackers for how long the training has run
params['been5min'] = 0
params['been20min'] = 0
params['been40min'] = 0
params['been1hr'] = 0
params['been2hr'] = 0
params['been3hr'] = 0
params['been4hr'] = 0
params['beenHalf'] = 0
def num_shifts_in_stack(params):
"""Calculate how many time points (shifts) will be used in loss functions.
Arguments:
params -- dictionary of parameters for experiment
Returns:
max_shifts_to_stack -- max number of shifts to use in loss functions
Side effects:
None
"""
max_shifts_to_stack = 1
if params['num_shifts']:
max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts']))
if params['num_shifts_middle']:
max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts_middle']))
return max_shifts_to_stack | helperfns.py | import datetime
import pickle
import time
import numpy as np
import tensorflow as tf
def stack_data(data, num_shifts, len_time):
"""Stack data from a 2D array into a 3D array.
Arguments:
data -- 2D data array to be reshaped
num_shifts -- number of shifts (time steps) that losses will use (maximum is len_time - 1)
len_time -- number of time steps in each trajectory in data
Returns:
data_tensor -- data reshaped into 3D array, shape: num_shifts + 1, num_traj * (len_time - num_shifts), n
Side effects:
None
"""
nd = data.ndim
if nd > 1:
n = data.shape[1]
else:
data = (np.asmatrix(data)).getT()
n = 1
num_traj = int(data.shape[0] / len_time)
new_len_time = len_time - num_shifts
data_tensor = np.zeros([num_shifts + 1, num_traj * new_len_time, n])
for j in np.arange(num_shifts + 1):
for count in np.arange(num_traj):
data_tensor_range = np.arange(count * new_len_time, new_len_time + count * new_len_time)
data_tensor[j, data_tensor_range, :] = data[count * len_time + j: count * len_time + j + new_len_time, :]
return data_tensor
def choose_optimizer(params, regularized_loss, trainable_var):
"""Choose which optimizer to use for the network training.
Arguments:
params -- dictionary of parameters for experiment
regularized_loss -- loss, including regularization
trainable_var -- list of trainable TensorFlow variables
Returns:
optimizer -- optimizer from TensorFlow Class optimizer
Side effects:
None
Raises ValueError if params['opt_alg'] is not 'adam', 'adadelta', 'adagrad', 'adagradDA', 'ftrl', 'proximalGD',
'proximalAdagrad', or 'RMS'
"""
if params['opt_alg'] == 'adam':
optimizer = tf.train.AdamOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'adadelta':
if params['decay_rate'] > 0:
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# defaults 0.001, 0.95
optimizer = tf.train.AdadeltaOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagrad':
# also has initial_accumulator_value parameter
optimizer = tf.train.AdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'adagradDA':
# Be careful when using AdagradDA for deep networks as it will require careful initialization of the gradient
# accumulators for it to train.
optimizer = tf.train.AdagradDAOptimizer(params['learning_rate'], tf.get_global_step()).minimize(
regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'ftrl':
# lots of hyperparameters: learning_rate_power, initial_accumulator_value,
# l1_regularization_strength, l2_regularization_strength
optimizer = tf.train.FtrlOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)
elif params['opt_alg'] == 'proximalGD':
# can have built-in reg.
optimizer = tf.train.ProximalGradientDescentOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'proximalAdagrad':
# initial_accumulator_value, reg.
optimizer = tf.train.ProximalAdagradOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
elif params['opt_alg'] == 'RMS':
# momentum, epsilon, centered (False/True)
if params['decay_rate'] > 0:
optimizer = tf.train.RMSPropOptimizer(params['learning_rate'], params['decay_rate']).minimize(
regularized_loss,
var_list=trainable_var)
else:
# default decay_rate 0.9
optimizer = tf.train.RMSPropOptimizer(params['learning_rate']).minimize(regularized_loss,
var_list=trainable_var)
else:
raise ValueError("chose invalid opt_alg %s in params dict" % params['opt_alg'])
return optimizer
def check_progress(start, best_error, params):
"""Check on the progress of the network training and decide if it's time to stop.
Arguments:
start -- time that experiment started
best_error -- best error so far in training
params -- dictionary of parameters for experiment
Returns:
finished -- 0 if should continue training, 1 if should stop training
save_now -- 0 if don't need to save results, 1 if should save results
Side effects:
May update params dict: stop_condition, been5min, been20min, been40min, been1hr, been2hr, been3hr, been4hr,
beenHalf
"""
finished = 0
save_now = 0
current_time = time.time()
if not params['been5min']:
# only check 5 min progress once
if current_time - start > 5 * 60:
if best_error > params['min_5min']:
print("too slowly improving in first five minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 5 min'
finished = 1
return finished, save_now
else:
print("been 5 minutes, err = %.15f < %.15f" % (best_error, params['min_5min']))
params['been5min'] = best_error
if not params['been20min']:
# only check 20 min progress once
if current_time - start > 20 * 60:
if best_error > params['min_20min']:
print("too slowly improving in first 20 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 20 min'
finished = 1
return finished, save_now
else:
print("been 20 minutes, err = %.15f < %.15f" % (best_error, params['min_20min']))
params['been20min'] = best_error
if not params['been40min']:
# only check 40 min progress once
if current_time - start > 40 * 60:
if best_error > params['min_40min']:
print("too slowly improving in first 40 minutes: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first 40 min'
finished = 1
return finished, save_now
else:
print("been 40 minutes, err = %.15f < %.15f" % (best_error, params['min_40min']))
params['been40min'] = best_error
if not params['been1hr']:
# only check 1 hr progress once
if current_time - start > 60 * 60:
if best_error > params['min_1hr']:
print("too slowly improving in first hour: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first hour'
finished = 1
return finished, save_now
else:
print("been 1 hour, err = %.15f < %.15f" % (best_error, params['min_1hr']))
save_now = 1
params['been1hr'] = best_error
if not params['been2hr']:
# only check 2 hr progress once
if current_time - start > 2 * 60 * 60:
if best_error > params['min_2hr']:
print("too slowly improving in first two hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first two hours'
finished = 1
return finished, save_now
else:
print("been 2 hours, err = %.15f < %.15f" % (best_error, params['min_2hr']))
save_now = 1
params['been2hr'] = best_error
if not params['been3hr']:
# only check 3 hr progress once
if current_time - start > 3 * 60 * 60:
if best_error > params['min_3hr']:
print("too slowly improving in first three hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first three hours'
finished = 1
return finished, save_now
else:
print("been 3 hours, err = %.15f < %.15f" % (best_error, params['min_3hr']))
save_now = 1
params['been3hr'] = best_error
if not params['been4hr']:
# only check 4 hr progress once
if current_time - start > 4 * 60 * 60:
if best_error > params['min_4hr']:
print("too slowly improving in first four hours: err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving in first four hours'
finished = 1
return finished, save_now
else:
print("been 4 hours, err = %.15f < %.15f" % (best_error, params['min_4hr']))
save_now = 1
params['been4hr'] = best_error
if not params['beenHalf']:
# only check halfway progress once
if current_time - start > params['max_time'] / 2:
if best_error > params['min_halfway']:
print("too slowly improving 1/2 of way in: val err %.15f" % best_error)
params['stop_condition'] = 'too slowly improving halfway in'
finished = 1
return finished, save_now
else:
print("Halfway through time, err = %.15f < %.15f" % (best_error, params['min_halfway']))
params['beenHalf'] = best_error
if current_time - start > params['max_time']:
params['stop_condition'] = 'past max time'
finished = 1
return finished, save_now
return finished, save_now
def save_files(sess, csv_path, train_val_error, params, weights, biases):
"""Save error files, weights, biases, and parameters.
Arguments:
sess -- TensorFlow session
csv_path -- string for path to save error file as csv
train_val_error -- table of training and validation errors
params -- dictionary of parameters for experiment
weights -- dictionary of weights for all networks
biases -- dictionary of biases for all networks
Returns:
None (but side effect of saving files and updating params dict.)
Side effects:
Save train_val_error, each weight W, each bias b, and params dict to file.
Update params dict: minTrain, minTest, minRegTrain, minRegTest
"""
np.savetxt(csv_path, train_val_error, delimiter=',')
for key, value in weights.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
for key, value in biases.items():
np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')
params['minTrain'] = np.min(train_val_error[:, 0])
params['minTest'] = np.min(train_val_error[:, 1])
params['minRegTrain'] = np.min(train_val_error[:, 2])
params['minRegTest'] = np.min(train_val_error[:, 3])
print("min train: %.12f, min val: %.12f, min reg. train: %.12f, min reg. val: %.12f" % (
params['minTrain'], params['minTest'], params['minRegTrain'], params['minRegTest']))
save_params(params)
def save_params(params):
"""Save parameter dictionary to file.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None
Side effects:
Saves params dict to pkl file
"""
with open(params['model_path'].replace('ckpt', 'pkl'), 'wb') as f:
pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)
def set_defaults(params):
"""Set defaults and make some checks in parameters dictionary.
Arguments:
params -- dictionary of parameters for experiment
Returns:
None (but side effect of updating params dict)
Side effects:
May update params dict
Raises KeyError if params is missing data_name, len_time, data_train_len, delta_t, widths, hidden_widths_omega,
num_evals, num_real, or num_complex_pairs
Raises ValueError if num_evals != 2 * num_complex_pairs + num_real
"""
# defaults related to dataset
if 'data_name' not in params:
raise KeyError("Error: must give data_name as input to main")
if 'len_time' not in params:
raise KeyError("Error, must give len_time as input to main")
if 'data_train_len' not in params:
raise KeyError("Error, must give data_train_len as input to main")
if 'delta_t' not in params:
raise KeyError("Error, must give delta_t as input to main")
# defaults related to saving results
if 'folder_name' not in params:
print("setting default: using folder named 'results'")
params['folder_name'] = 'results'
if 'exp_suffix' not in params:
print("setting default name of experiment")
params['exp_suffix'] = '_' + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
if 'model_path' not in params:
print("setting default path for model")
exp_name = params['data_name'] + params['exp_suffix']
params['model_path'] = "./%s/%s_model.ckpt" % (params['folder_name'], exp_name)
# defaults related to network architecture
if 'widths' not in params:
raise KeyError("Error, must give widths as input to main")
print(params['widths'])
if 'hidden_widths_omega' not in params:
raise KeyError("Error, must give hidden_widths for omega net")
params['widths_omega_complex'] = [1, ] + params['hidden_widths_omega'] + [2, ]
params['widths_omega_real'] = [1, ] + params['hidden_widths_omega'] + [1, ]
print(params['widths_omega_complex'])
print(params['widths_omega_real'])
if 'act_type' not in params:
print("setting default: activation function is ReLU")
params['act_type'] = 'relu'
if 'num_evals' not in params:
raise KeyError("Error, must give number of evals: num_evals")
if 'num_real' not in params:
raise KeyError("Error, must give number of real eigenvalues: num_real")
if 'num_complex_pairs' not in params:
raise KeyError("Error, must give number of pairs of complex eigenvalues: num_complex_pairs")
if params['num_evals'] != (2 * params['num_complex_pairs'] + params['num_real']):
raise ValueError("Error, num_evals must equal 2*num_compex_pairs + num_real")
params['d'] = len(params['widths']) # d must be calculated like this
# defaults related to initialization of parameters
if 'seed' not in params:
random_seed = np.random.randint(2 ** 30)
print("setting default: choosing random seed of %d and saving to params" % random_seed)
params['seed'] = random_seed
if 'dist_weights' not in params:
print("setting default: distribution for weights on main net is tn (truncated normal)")
params['dist_weights'] = 'tn'
if 'dist_weights_omega' not in params:
print("setting default: distribution for weights on auxiliary net is tn (truncated normal)")
params['dist_weights_omega'] = 'tn'
if 'dist_biases' not in params:
print("setting default: biases in main net will be init. to default number")
params['dist_biases'] = 0
if 'dist_biases_omega' not in params:
print("setting default: biases in auxiliary net will be init. to default number")
params['dist_biases_omega'] = 0
if 'scale' not in params:
print("setting default: scale for weights in main net is 0.1 (applies to tn distribution)")
params['scale'] = 0.1
if 'scale_omega' not in params:
print("setting default: scale for weights in omega net is 0.1 (applies to tn distribution)")
params['scale_omega'] = 0.1
if isinstance(params['dist_weights'], str):
params['dist_weights'] = [params['dist_weights']] * (len(params['widths']) - 1)
if isinstance(params['dist_biases'], int):
params['dist_biases'] = [params['dist_biases']] * (len(params['widths']) - 1)
if isinstance(params['dist_weights_omega'], str):
params['dist_weights_omega'] = [params['dist_weights_omega']] * (len(params['widths_omega_real']) - 1)
if isinstance(params['dist_biases_omega'], int):
params['dist_biases_omega'] = [params['dist_biases_omega']] * (len(params['widths_omega_real']) - 1)
# defaults related to loss function
if 'auto_first' not in params:
params['auto_first'] = 0
if 'relative_loss' not in params:
print("setting default: loss is not relative")
params['relative_loss'] = 0
if 'shifts' not in params:
print("setting default: penalty on all shifts from 1 to num_shifts")
params['shifts'] = np.arange(params['num_shifts']) + 1
if 'shifts_middle' not in params:
print("setting default: penalty on all middle shifts from 1 to num_shifts_middle")
params['shifts_middle'] = np.arange(params['num_shifts_middle']) + 1
params['num_shifts'] = len(params['shifts']) # must be calculated like this
params['num_shifts_middle'] = len(params['shifts_middle']) # must be calculated like this
if 'recon_lam' not in params:
print("setting default: weight on reconstruction is 1.0")
params['recon_lam'] = 1.0
if 'mid_shift_lam' not in params:
print("setting default: weight on loss3 is 1.0")
params['mid_shift_lam'] = 1.0
if 'L1_lam' not in params:
print("setting default: L1_lam is .00001")
params['L1_lam'] = .00001
if 'L2_lam' not in params:
print("setting default: no L2 regularization")
params['L2_lam'] = 0.0
if 'Linf_lam' not in params:
print("setting default: no L_inf penalty")
params['Linf_lam'] = 0.0
# defaults related to training
if 'num_passes_per_file' not in params:
print("setting default: 1000 passes per training file")
params['num_passes_per_file'] = 1000
if 'num_steps_per_batch' not in params:
print("setting default: 1 step per batch before moving to next training file")
params['num_steps_per_batch'] = 1
if 'num_steps_per_file_pass' not in params:
print("setting default: up to 1000000 steps per training file before moving to next one")
params['num_steps_per_file_pass'] = 1000000
if 'learning_rate' not in params:
print("setting default learning rate")
params['learning_rate'] = .003
if 'opt_alg' not in params:
print("setting default: use Adam optimizer")
params['opt_alg'] = 'adam'
if 'decay_rate' not in params:
print("setting default: decay_rate is 0 (applies to some optimizer algorithms)")
params['decay_rate'] = 0
if 'batch_size' not in params:
print("setting default: no batches (use whole training file at once)")
params['batch_size'] = 0
# setting defaults related to keeping track of training time and progress
if 'max_time' not in params:
print("setting default: run up to 6 hours")
params['max_time'] = 6 * 60 * 60 # 6 hours
if 'min_5min' not in params:
params['min_5min'] = 10 ** (-2)
print("setting default: must reach %f in 5 minutes" % params['min_5min'])
if 'min_20min' not in params:
params['min_20min'] = 10 ** (-3)
print("setting default: must reach %f in 20 minutes" % params['min_20min'])
if 'min_40min' not in params:
params['min_40min'] = 10 ** (-4)
print("setting default: must reach %f in 40 minutes" % params['min_40min'])
if 'min_1hr' not in params:
params['min_1hr'] = 10 ** (-5)
print("setting default: must reach %f in 1 hour" % params['min_1hr'])
if 'min_2hr' not in params:
params['min_2hr'] = 10 ** (-5.25)
print("setting default: must reach %f in 2 hours" % params['min_2hr'])
if 'min_3hr' not in params:
params['min_3hr'] = 10 ** (-5.5)
print("setting default: must reach %f in 3 hours" % params['min_3hr'])
if 'min_4hr' not in params:
params['min_4hr'] = 10 ** (-5.75)
print("setting default: must reach %f in 4 hours" % params['min_4hr'])
if 'min_halfway' not in params:
params['min_halfway'] = 10 ** (-4)
print("setting default: must reach %f in first half of time allotted" % params['min_halfway'])
# initializing trackers for how long the training has run
params['been5min'] = 0
params['been20min'] = 0
params['been40min'] = 0
params['been1hr'] = 0
params['been2hr'] = 0
params['been3hr'] = 0
params['been4hr'] = 0
params['beenHalf'] = 0
def num_shifts_in_stack(params):
"""Calculate how many time points (shifts) will be used in loss functions.
Arguments:
params -- dictionary of parameters for experiment
Returns:
max_shifts_to_stack -- max number of shifts to use in loss functions
Side effects:
None
"""
max_shifts_to_stack = 1
if params['num_shifts']:
max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts']))
if params['num_shifts_middle']:
max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts_middle']))
return max_shifts_to_stack | 0.649356 | 0.437944 |
import logging
import os
import torch
import GRNetDetector.utils.data_loaders
import GRNetDetector.utils.helpers
from datetime import datetime
from time import time
from tensorboardX import SummaryWriter
from GRNetDetector.core.test import test_net
from GRNetDetector.extensions.chamfer_dist import ChamferDistance
from GRNetDetector.extensions.gridding_loss import GriddingLoss
from GRNetDetector.models.grnet import GRNet
from GRNetDetector.utils.average_meter import AverageMeter
from GRNetDetector.utils.metrics import Metrics
def train_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
# Set up data loader
train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg)
test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.TRAIN),
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=True,
drop_last=True)
val_data_loader = torch.utils.data.DataLoader(dataset=test_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.VAL),
batch_size=1,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=False)
# Set up folders for logs and checkpoints
output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s', datetime.now().isoformat())
cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints'
cfg.DIR.LOGS = output_dir % 'logs'
if not os.path.exists(cfg.DIR.CHECKPOINTS):
os.makedirs(cfg.DIR.CHECKPOINTS)
# Create tensorboard writers
train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train'))
val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test'))
# Create the networks
grnet = GRNet(cfg)
grnet.apply(utils.helpers.init_weights)
logging.debug('Parameters in GRNet: %d.' % utils.helpers.count_parameters(grnet))
# Move the network to GPU if possible
if torch.cuda.is_available():
grnet = torch.nn.DataParallel(grnet).cuda()
# Create the optimizers
grnet_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, grnet.parameters()),
lr=cfg.TRAIN.LEARNING_RATE,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS)
grnet_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(grnet_optimizer,
milestones=cfg.TRAIN.LR_MILESTONES,
gamma=cfg.TRAIN.GAMMA)
# Set up loss functions
chamfer_dist = ChamferDistance()
gridding_loss = GriddingLoss( # lgtm [py/unused-local-variable]
scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)
# Load pretrained model if exists
init_epoch = 0
best_metrics = None
if 'WEIGHTS' in cfg.CONST:
logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
best_metrics = Metrics(cfg.TEST.METRIC_NAME, checkpoint['best_metrics'])
grnet.load_state_dict(checkpoint['grnet'])
logging.info('Recover complete. Current epoch = #%d; best metrics = %s.' % (init_epoch, best_metrics))
# Training/Testing the network
for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1):
epoch_start_time = time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(['SparseLoss', 'DenseLoss'])
grnet.train()
batch_end_time = time()
n_batches = len(train_data_loader)
for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(train_data_loader):
data_time.update(time() - batch_end_time)
for k, v in data.items():
data[k] = utils.helpers.var_or_cuda(v)
sparse_ptcloud, dense_ptcloud = grnet(data)
sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
_loss = sparse_loss + dense_loss
losses.update([sparse_loss.item() * 1000, dense_loss.item() * 1000])
grnet.zero_grad()
_loss.backward()
grnet_optimizer.step()
n_itr = (epoch_idx - 1) * n_batches + batch_idx
train_writer.add_scalar('Loss/Batch/Sparse', sparse_loss.item() * 1000, n_itr)
train_writer.add_scalar('Loss/Batch/Dense', dense_loss.item() * 1000, n_itr)
batch_time.update(time() - batch_end_time)
batch_end_time = time()
logging.info('[Epoch %d/%d][Batch %d/%d] BatchTime = %.3f (s) DataTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches, batch_time.val(), data_time.val(),
['%.4f' % l for l in losses.val()]))
grnet_lr_scheduler.step()
epoch_end_time = time()
train_writer.add_scalar('Loss/Epoch/Sparse', losses.avg(0), epoch_idx)
train_writer.add_scalar('Loss/Epoch/Dense', losses.avg(1), epoch_idx)
logging.info(
'[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time, ['%.4f' % l for l in losses.avg()]))
# Validate the current model
metrics = test_net(cfg, epoch_idx, val_data_loader, val_writer, grnet)
# Save ckeckpoints
if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or metrics.better_than(best_metrics):
file_name = 'ckpt-best.pth' if metrics.better_than(best_metrics) else 'ckpt-epoch-%03d.pth' % epoch_idx
output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name)
torch.save({
'epoch_index': epoch_idx,
'best_metrics': metrics.state_dict(),
'grnet': grnet.state_dict()
}, output_path) # yapf: disable
logging.info('Saved checkpoint to %s ...' % output_path)
if metrics.better_than(best_metrics):
best_metrics = metrics
train_writer.close()
val_writer.close() | GRNetDetector/core/train.py |
import logging
import os
import torch
import GRNetDetector.utils.data_loaders
import GRNetDetector.utils.helpers
from datetime import datetime
from time import time
from tensorboardX import SummaryWriter
from GRNetDetector.core.test import test_net
from GRNetDetector.extensions.chamfer_dist import ChamferDistance
from GRNetDetector.extensions.gridding_loss import GriddingLoss
from GRNetDetector.models.grnet import GRNet
from GRNetDetector.utils.average_meter import AverageMeter
from GRNetDetector.utils.metrics import Metrics
def train_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
# Set up data loader
train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg)
test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.TRAIN),
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=True,
drop_last=True)
val_data_loader = torch.utils.data.DataLoader(dataset=test_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.VAL),
batch_size=1,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=False)
# Set up folders for logs and checkpoints
output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s', datetime.now().isoformat())
cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints'
cfg.DIR.LOGS = output_dir % 'logs'
if not os.path.exists(cfg.DIR.CHECKPOINTS):
os.makedirs(cfg.DIR.CHECKPOINTS)
# Create tensorboard writers
train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train'))
val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test'))
# Create the networks
grnet = GRNet(cfg)
grnet.apply(utils.helpers.init_weights)
logging.debug('Parameters in GRNet: %d.' % utils.helpers.count_parameters(grnet))
# Move the network to GPU if possible
if torch.cuda.is_available():
grnet = torch.nn.DataParallel(grnet).cuda()
# Create the optimizers
grnet_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, grnet.parameters()),
lr=cfg.TRAIN.LEARNING_RATE,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS)
grnet_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(grnet_optimizer,
milestones=cfg.TRAIN.LR_MILESTONES,
gamma=cfg.TRAIN.GAMMA)
# Set up loss functions
chamfer_dist = ChamferDistance()
gridding_loss = GriddingLoss( # lgtm [py/unused-local-variable]
scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)
# Load pretrained model if exists
init_epoch = 0
best_metrics = None
if 'WEIGHTS' in cfg.CONST:
logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
best_metrics = Metrics(cfg.TEST.METRIC_NAME, checkpoint['best_metrics'])
grnet.load_state_dict(checkpoint['grnet'])
logging.info('Recover complete. Current epoch = #%d; best metrics = %s.' % (init_epoch, best_metrics))
# Training/Testing the network
for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1):
epoch_start_time = time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(['SparseLoss', 'DenseLoss'])
grnet.train()
batch_end_time = time()
n_batches = len(train_data_loader)
for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(train_data_loader):
data_time.update(time() - batch_end_time)
for k, v in data.items():
data[k] = utils.helpers.var_or_cuda(v)
sparse_ptcloud, dense_ptcloud = grnet(data)
sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
_loss = sparse_loss + dense_loss
losses.update([sparse_loss.item() * 1000, dense_loss.item() * 1000])
grnet.zero_grad()
_loss.backward()
grnet_optimizer.step()
n_itr = (epoch_idx - 1) * n_batches + batch_idx
train_writer.add_scalar('Loss/Batch/Sparse', sparse_loss.item() * 1000, n_itr)
train_writer.add_scalar('Loss/Batch/Dense', dense_loss.item() * 1000, n_itr)
batch_time.update(time() - batch_end_time)
batch_end_time = time()
logging.info('[Epoch %d/%d][Batch %d/%d] BatchTime = %.3f (s) DataTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches, batch_time.val(), data_time.val(),
['%.4f' % l for l in losses.val()]))
grnet_lr_scheduler.step()
epoch_end_time = time()
train_writer.add_scalar('Loss/Epoch/Sparse', losses.avg(0), epoch_idx)
train_writer.add_scalar('Loss/Epoch/Dense', losses.avg(1), epoch_idx)
logging.info(
'[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time, ['%.4f' % l for l in losses.avg()]))
# Validate the current model
metrics = test_net(cfg, epoch_idx, val_data_loader, val_writer, grnet)
# Save ckeckpoints
if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or metrics.better_than(best_metrics):
file_name = 'ckpt-best.pth' if metrics.better_than(best_metrics) else 'ckpt-epoch-%03d.pth' % epoch_idx
output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name)
torch.save({
'epoch_index': epoch_idx,
'best_metrics': metrics.state_dict(),
'grnet': grnet.state_dict()
}, output_path) # yapf: disable
logging.info('Saved checkpoint to %s ...' % output_path)
if metrics.better_than(best_metrics):
best_metrics = metrics
train_writer.close()
val_writer.close() | 0.832169 | 0.183758 |
from flask import Flask, jsonify, render_template
from uuid import uuid4
from random import randint
import random
from blockchain_db import BlockchainDB
app = Flask(__name__)
blockchain_db_manager = BlockchainDB()
@app.route('/', methods=['GET'])
def hello_world():
"""
Welcome to Blockchain message
:return: HTML
"""
response = {
'header': 'Welcome to BlockchainDB'
}
return render_template('landing.html', data=response)
@app.route('/reset', methods=['GET'])
def reset():
"""
Drop the database and start all over again by creating the genesis block.
Run once when start, or whenever you feel like dropping!
:return: HTML
"""
blockchain_db_manager.reset()
response = {
'header': 'Successfully generated a genesis block'
}
return render_template('landing.html', data=response)
@app.route('/mine/<int:number>', methods=['GET'])
def mine_blocks(number):
"""
Mine for a some number of blocks with random generated transactions.
:return: HTML
"""
transactions_range = randint(1, 10)
for i in range(number):
for transaction in range(transactions_range):
blockchain_db_manager.add_transaction(sender=(str(uuid4()).replace('-', '')[:-10]),
recipient=(str(uuid4()).replace('-', '')[:-10]),
amount=round(random.uniform(1, 10), 2))
blockchain_db_manager.mine_for_next_block()
response = {
'header': 'Successfully mined {0} blocks'.format(number)
}
return render_template('landing.html', data=response)
@app.route('/view/chain', methods=['GET'])
def view_blockchain():
"""
View the full BlockChain.
:return: HTML
"""
response = {
'chain': blockchain_db_manager.get_all_blocks(),
'length': blockchain_db_manager.get_length(),
'header': 'Full chain'
}
return render_template('chain.html', data=response)
@app.route('/view/last_blocks/<int:number>', methods=['GET'])
def view_last_n_block(number):
"""
View the last number of mined blocks.
:param number: Number of blocks
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_last_n_blocks(number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Last {0} Blocks'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/last_block', methods=['GET'])
def view_last_block():
"""
View the last block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_last_block()],
'length': 1,
'header': 'Last Block'
}
return render_template('chain.html', data=response)
@app.route('/view/genesis_block', methods=['GET'])
def view_genesis_block():
"""
View the genesis block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_genesis_block()],
'length': 1,
'header': 'Genesis Block'
}
return render_template('chain.html', data=response)
@app.route('/view/block/<int:number>', methods=['GET'])
def view_block(number):
"""
View a specific block for a given height number.
:param number: Block height
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_block(number)],
'length': 1,
'header': 'Block {0}'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/top/<int:number>/<string:state>', methods=['GET'])
def view_top_blocks(number, state):
"""
View a number of top blocks for a given state.
:param number: Number of blocks
:param state: difficulty | elapsed_time | block_reward | hash_power | height | nonce | number_of_transaction
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_top_blocks(state=state, number=number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Top {0} {1}'.format(number, state)
}
return render_template('chain.html', data=response)
if __name__ == '__main__':
app.run() | src/blockchain_db_server.py | from flask import Flask, jsonify, render_template
from uuid import uuid4
from random import randint
import random
from blockchain_db import BlockchainDB
app = Flask(__name__)
blockchain_db_manager = BlockchainDB()
@app.route('/', methods=['GET'])
def hello_world():
"""
Welcome to Blockchain message
:return: HTML
"""
response = {
'header': 'Welcome to BlockchainDB'
}
return render_template('landing.html', data=response)
@app.route('/reset', methods=['GET'])
def reset():
"""
Drop the database and start all over again by creating the genesis block.
Run once when start, or whenever you feel like dropping!
:return: HTML
"""
blockchain_db_manager.reset()
response = {
'header': 'Successfully generated a genesis block'
}
return render_template('landing.html', data=response)
@app.route('/mine/<int:number>', methods=['GET'])
def mine_blocks(number):
"""
Mine for a some number of blocks with random generated transactions.
:return: HTML
"""
transactions_range = randint(1, 10)
for i in range(number):
for transaction in range(transactions_range):
blockchain_db_manager.add_transaction(sender=(str(uuid4()).replace('-', '')[:-10]),
recipient=(str(uuid4()).replace('-', '')[:-10]),
amount=round(random.uniform(1, 10), 2))
blockchain_db_manager.mine_for_next_block()
response = {
'header': 'Successfully mined {0} blocks'.format(number)
}
return render_template('landing.html', data=response)
@app.route('/view/chain', methods=['GET'])
def view_blockchain():
"""
View the full BlockChain.
:return: HTML
"""
response = {
'chain': blockchain_db_manager.get_all_blocks(),
'length': blockchain_db_manager.get_length(),
'header': 'Full chain'
}
return render_template('chain.html', data=response)
@app.route('/view/last_blocks/<int:number>', methods=['GET'])
def view_last_n_block(number):
"""
View the last number of mined blocks.
:param number: Number of blocks
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_last_n_blocks(number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Last {0} Blocks'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/last_block', methods=['GET'])
def view_last_block():
"""
View the last block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_last_block()],
'length': 1,
'header': 'Last Block'
}
return render_template('chain.html', data=response)
@app.route('/view/genesis_block', methods=['GET'])
def view_genesis_block():
"""
View the genesis block.
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_genesis_block()],
'length': 1,
'header': 'Genesis Block'
}
return render_template('chain.html', data=response)
@app.route('/view/block/<int:number>', methods=['GET'])
def view_block(number):
"""
View a specific block for a given height number.
:param number: Block height
:return: HTML
"""
response = {
'chain': [blockchain_db_manager.get_block(number)],
'length': 1,
'header': 'Block {0}'.format(number)
}
return render_template('chain.html', data=response)
@app.route('/view/top/<int:number>/<string:state>', methods=['GET'])
def view_top_blocks(number, state):
"""
View a number of top blocks for a given state.
:param number: Number of blocks
:param state: difficulty | elapsed_time | block_reward | hash_power | height | nonce | number_of_transaction
:return: HTML
"""
# Reverse order to display latest ones to oldest one
temp = []
blocks = blockchain_db_manager.get_top_blocks(state=state, number=number)
for i in range(number - 1, -1, -1):
temp.append(blocks[i])
response = {
'chain': temp,
'length': number,
'header': 'Top {0} {1}'.format(number, state)
}
return render_template('chain.html', data=response)
if __name__ == '__main__':
app.run() | 0.655115 | 0.200519 |
import heapq as heap
filename = 'e_high_bonus'
class Ride:
def __init__(self, start_row, start_col, finish_row, finish_col, early_start, late_finish, bonus, i):
self.start_row = start_row
self.start_col = start_col
self.finish_row = finish_row
self.finish_col = finish_col
self.early_start = early_start
self.late_finish = late_finish
self.id = i
self.score = 0
self.distance = abs(finish_row - start_row) + abs(finish_col - start_col)
self.init_dist = 0
self.bonus = bonus
self.calculate_score()
def calculate_score(self, next_start=0, row=0, col=0):
self.init_dist = abs(self.start_col - col) + abs(self.start_row - row)
if self.init_dist + next_start <= self.early_start:
self.score = self.bonus + self.distance
elif self.init_dist + next_start + self.distance <= self.late_finish:
self.score = self.distance
def __repr__(self):
return '(' + str(self.id) + ', ' + str(self.score) + ')'
def __lt__(self, other):
return self.score > other.score
class Vehicle:
def __init__(self):
self.rides = []
self.row = 0
self.col = 0
self.busy = False
self.finished_rides = 0
self.rides_order = []
self.next = 0
def update_rides(self):
this_ride = heap.heappop(self.rides)
heap.heappush(self.rides, this_ride)
def move(self, row, col):
row_dist = abs(row - self.row)
col_dist = abs(col - self.col)
self.row = row
self.col = col
return [row_dist, col_dist]
def assign_ride(self, ride):
self.finished_rides += 1
self.rides_order.append(ride.id)
self.next += ride.init_dist + ride.distance
self.row = ride.finish_row
self.col = ride.finish_col
return self
def __lt__(self, other):
return self.next < other.next
def start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps):
pass
def init_tasks(vehicles, ride_queue):
for i in range(len(vehicles)):
next_vehicle = heap.heappop(vehicles)
next_ride = heap.heappop(ride_queue)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def simulate(vehicles, ride_queue, t):
time = 0
while True:
if (len(ride_queue) == 0) or time > t:
break
print(time)
next_vehicle = heap.heappop(vehicles)
time = next_vehicle.next
new_scores = []
for ride in ride_queue:
ride.calculate_score(next_vehicle.next, next_vehicle.row, next_vehicle.col)
heap.heappush(new_scores, ride)
ride_queue = new_scores
next_ride = heap.heappop(new_scores)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def main():
with open(filename + '.in') as input_file:
input_data = input_file.read()
input_lines = input_data.splitlines()
grid_rows, grid_cols, vehicle_no, rides_no, bonus, sim_steps = [int(i) for i in input_lines[0].split(' ')]
ride_data = input_lines[1:]
ride_queue = []
for i in range(rides_no):
ride = Ride(*[int(i) for i in ride_data[i].split(' ')], bonus, i)
heap.heappush(ride_queue, ride)
vehicle = []
for i in range(vehicle_no):
vehicle.append(Vehicle())
# print(vehicle)
# print(ride_queue)
init_tasks(vehicle, ride_queue)
simulate(vehicle, ride_queue, sim_steps)
# start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps)
write_file(vehicle)
def write_file(vehicle):
with open(filename + '.out', 'w') as f:
for car in vehicle:
# rides = ' '.joincar.rides_order
f.write(str(car.finished_rides) + ' ' + ' '.join([str(i) for i in car.rides_order]) + '\n')
main() | main.py | import heapq as heap
filename = 'e_high_bonus'
class Ride:
def __init__(self, start_row, start_col, finish_row, finish_col, early_start, late_finish, bonus, i):
self.start_row = start_row
self.start_col = start_col
self.finish_row = finish_row
self.finish_col = finish_col
self.early_start = early_start
self.late_finish = late_finish
self.id = i
self.score = 0
self.distance = abs(finish_row - start_row) + abs(finish_col - start_col)
self.init_dist = 0
self.bonus = bonus
self.calculate_score()
def calculate_score(self, next_start=0, row=0, col=0):
self.init_dist = abs(self.start_col - col) + abs(self.start_row - row)
if self.init_dist + next_start <= self.early_start:
self.score = self.bonus + self.distance
elif self.init_dist + next_start + self.distance <= self.late_finish:
self.score = self.distance
def __repr__(self):
return '(' + str(self.id) + ', ' + str(self.score) + ')'
def __lt__(self, other):
return self.score > other.score
class Vehicle:
def __init__(self):
self.rides = []
self.row = 0
self.col = 0
self.busy = False
self.finished_rides = 0
self.rides_order = []
self.next = 0
def update_rides(self):
this_ride = heap.heappop(self.rides)
heap.heappush(self.rides, this_ride)
def move(self, row, col):
row_dist = abs(row - self.row)
col_dist = abs(col - self.col)
self.row = row
self.col = col
return [row_dist, col_dist]
def assign_ride(self, ride):
self.finished_rides += 1
self.rides_order.append(ride.id)
self.next += ride.init_dist + ride.distance
self.row = ride.finish_row
self.col = ride.finish_col
return self
def __lt__(self, other):
return self.next < other.next
def start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps):
pass
def init_tasks(vehicles, ride_queue):
for i in range(len(vehicles)):
next_vehicle = heap.heappop(vehicles)
next_ride = heap.heappop(ride_queue)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def simulate(vehicles, ride_queue, t):
time = 0
while True:
if (len(ride_queue) == 0) or time > t:
break
print(time)
next_vehicle = heap.heappop(vehicles)
time = next_vehicle.next
new_scores = []
for ride in ride_queue:
ride.calculate_score(next_vehicle.next, next_vehicle.row, next_vehicle.col)
heap.heappush(new_scores, ride)
ride_queue = new_scores
next_ride = heap.heappop(new_scores)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def main():
with open(filename + '.in') as input_file:
input_data = input_file.read()
input_lines = input_data.splitlines()
grid_rows, grid_cols, vehicle_no, rides_no, bonus, sim_steps = [int(i) for i in input_lines[0].split(' ')]
ride_data = input_lines[1:]
ride_queue = []
for i in range(rides_no):
ride = Ride(*[int(i) for i in ride_data[i].split(' ')], bonus, i)
heap.heappush(ride_queue, ride)
vehicle = []
for i in range(vehicle_no):
vehicle.append(Vehicle())
# print(vehicle)
# print(ride_queue)
init_tasks(vehicle, ride_queue)
simulate(vehicle, ride_queue, sim_steps)
# start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps)
write_file(vehicle)
def write_file(vehicle):
with open(filename + '.out', 'w') as f:
for car in vehicle:
# rides = ' '.joincar.rides_order
f.write(str(car.finished_rides) + ' ' + ' '.join([str(i) for i in car.rides_order]) + '\n')
main() | 0.373304 | 0.28902 |
import json
import os
import time
from typing import Any, List, Optional, Tuple
import urllib.parse
from bitcoinx import TxOutput, Script, Address, classify_output_script
import requests
from .exceptions import FileImportFailed, FileImportFailedEncrypted, Bip270Exception
from .logs import logs
from .util import bfh
logger = logs.get_logger("paymentrequest")
REQUEST_HEADERS = {
'Accept': 'application/bitcoinsv-paymentrequest',
'User-Agent': 'ElectrumSV'
}
ACK_HEADERS = {
'Content-Type': 'application/bitcoinsv-payment',
'Accept': 'application/bitcoinsv-paymentack',
'User-Agent': 'ElectrumSV'
}
# Used for requests.
ca_path = requests.certs.where()
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
class Output:
# FIXME: this should either be removed in favour of TxOutput, or be a lighter wrapper
# around it.
def __init__(self, script: Script, amount: Optional[int]=None,
description: Optional[str]=None):
self.script = script
# TODO: Must not have a JSON string length of 100 bytes.
if description is not None:
description_json = json.dumps(description)
if len(description_json) > 100:
raise Bip270Exception("Output description too long")
self.description = description
self.amount = amount
def address(self):
return classify_output_script(self.script)
def to_tx_output(self):
return TxOutput(self.amount, self.script)
def to_ui_dict(self) -> dict:
return {
'amount': self.amount,
'address': self.address(),
}
def get_address_string(self):
return self.address().to_string()
@classmethod
def from_dict(klass, data: dict) -> 'Output':
if 'script' not in data:
raise Bip270Exception("Missing required 'script' field")
script_hex = data['script']
amount = data.get('amount')
if amount is not None and type(amount) is not int:
raise Bip270Exception("Invalid 'amount' field")
description = data.get('description')
if description is not None and type(description) is not str:
raise Bip270Exception("Invalid 'description' field")
return klass(Script.from_hex(script_hex), amount, description)
def to_dict(self) -> dict:
data = {
'script': self.script.to_hex(),
}
if self.amount and type(self.amount) is int:
data['amount'] = self.amount
if self.description:
data['description'] = self.description
return data
@classmethod
def from_json(klass, s: str) -> 'Output':
data = json.loads(s)
return klass.from_dict(data)
def to_json(self) -> str:
data = self.to_dict()
return json.dumps(data)
class PaymentRequest:
MAXIMUM_JSON_LENGTH = 10 * 1000 * 1000
def __init__(self, outputs, creation_timestamp=None, expiration_timestamp=None, memo=None,
payment_url=None, merchant_data=None):
# This is only used if there is a requestor identity (old openalias, needs rewrite).
self.id = os.urandom(16).hex()
# This is related to identity.
self.requestor = None # known after verify
self.tx = None
self.outputs = outputs
if creation_timestamp is not None:
creation_timestamp = int(creation_timestamp)
else:
creation_timestamp = int(time.time())
self.creation_timestamp = creation_timestamp
if expiration_timestamp is not None:
expiration_timestamp = int(expiration_timestamp)
self.expiration_timestamp = expiration_timestamp
self.memo = memo
self.payment_url = payment_url
self.merchant_data = merchant_data
def __str__(self) -> str:
return self.to_json()
@classmethod
def from_wallet_entry(klass, data: dict) -> 'PaymentRequest':
address = data['address']
amount = data['amount']
memo = data['memo']
creation_timestamp = data.get('time')
expiration_timestamp = None
expiration_seconds = data.get('exp')
if creation_timestamp is not None and expiration_seconds is not None:
expiration_timestamp = creation_timestamp + expiration_seconds
outputs = [ Output(address.to_script(), amount) ]
return klass(outputs, creation_timestamp, expiration_timestamp, memo)
@classmethod
def from_json(klass, s: str) -> 'PaymentRequest':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment request, too large")
d = json.loads(s)
network = d.get('network')
if network != 'bitcoin':
raise Bip270Exception(f"Invalid json network: {network}")
if 'outputs' not in d:
raise Bip270Exception("Missing required json 'outputs' field")
if type(d['outputs']) is not list:
raise Bip270Exception("Invalid json 'outputs' field")
outputs = []
for ui_dict in d['outputs']:
outputs.append(Output.from_dict(ui_dict))
pr = klass(outputs)
if 'creationTimestamp' not in d:
raise Bip270Exception("Missing required json 'creationTimestamp' field")
creation_timestamp = d['creationTimestamp']
if type(creation_timestamp) is not int:
raise Bip270Exception("Invalid json 'creationTimestamp' field")
pr.creation_timestamp = creation_timestamp
expiration_timestamp = d.get('expirationTimestamp')
if expiration_timestamp is not None and type(expiration_timestamp) is not int:
raise Bip270Exception("Invalid json 'expirationTimestamp' field")
pr.expiration_timestamp = expiration_timestamp
memo = d.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
pr.memo = memo
payment_url = d.get('paymentUrl')
if payment_url is not None and type(payment_url) is not str:
raise Bip270Exception("Invalid json 'paymentUrl' field")
pr.payment_url = payment_url
merchant_data = d.get('merchantData')
if merchant_data is not None and type(merchant_data) is not str:
raise Bip270Exception("Invalid json 'merchantData' field")
pr.merchant_data = merchant_data
return pr
def to_json(self) -> str:
d = {}
d['network'] = 'bitcoin'
d['outputs'] = [ output.to_dict() for output in self.outputs ]
d['creationTimestamp'] = self.creation_timestamp
if self.expiration_timestamp is not None:
d['expirationTimestamp'] = self.expiration_timestamp
if self.memo is not None:
d['memo'] = self.memo
if self.payment_url is not None:
d['paymentUrl'] = self.payment_url
if self.merchant_data is not None:
d['merchantData'] = self.merchant_data
return json.dumps(d)
def is_pr(self):
return self.get_amount() != 0
def verify(self, contacts) -> bool:
# the address will be dispayed as requestor
self.requestor = None
return True
def has_expired(self) -> bool:
return self.expiration_timestamp and self.expiration_timestamp < int(time.time())
def get_expiration_date(self) -> int:
return self.expiration_timestamp
def get_amount(self) -> int:
return sum(x.amount for x in self.outputs)
def get_address(self) -> str:
return self.outputs[0].get_address_string()
def get_requestor(self) -> str:
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self) -> str:
return self.error if self.requestor else "No Signature"
def get_memo(self) -> str:
return self.memo
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self) -> List[TxOutput]:
return [output.to_tx_output() for output in self.outputs]
def send_payment(self, transaction_hex: str, refund_address: Address) -> Tuple[bool, str]:
if not self.payment_url:
return False, "no url"
payment_memo = "Paid using ElectrumSV"
payment = Payment(self.merchant_data, transaction_hex, [], payment_memo)
payment.refund_outputs.append(Output(refund_address.to_script()))
parsed_url = urllib.parse.urlparse(self.payment_url)
response = self._make_request(parsed_url.geturl(), payment.to_json())
if response is None:
return False, "Payment Message/PaymentACK Failed"
if response.get_status_code() != 200:
# Propagate 'Bad request' (HTTP 400) messages to the user since they
# contain valuable information.
if response.get_status_code() == 400:
return False, f"{response.get_reason()}: {response.get_content().decode('UTF-8')}"
# Some other errors might display an entire HTML document.
# Hide those and just display the name of the error code.
return False, response.get_reason()
try:
payment_ack = PaymentACK.from_json(response.get_content())
except Exception:
return False, ("PaymentACK could not be processed. Payment was sent; "
"please manually verify that payment was received.")
logger.debug("PaymentACK message received: %s", payment_ack.memo)
return True, payment_ack.memo
# The following function and classes is abstracted to allow unit testing.
def _make_request(self, url, message):
try:
r = requests.post(url, data=message, headers=ACK_HEADERS, verify=ca_path)
except requests.exceptions.SSLError:
logger.exception("Payment Message/PaymentACK")
return None
return self._RequestsResponseWrapper(r)
class _RequestsResponseWrapper:
def __init__(self, response):
self._response = response
def get_status_code(self):
return self._response.status_code
def get_reason(self):
return self._response.reason
def get_content(self):
return self._response.content
class Payment:
MAXIMUM_JSON_LENGTH = 10 * 1000 * 1000
def __init__(self, merchant_data: Any, transaction_hex: str, refund_outputs: List[Output],
memo: Optional[str]=None):
self.merchant_data = merchant_data
self.transaction_hex = transaction_hex
self.refund_outputs = refund_outputs
self.memo = memo
@classmethod
def from_dict(klass, data: dict) -> 'Payment':
if 'merchantData' not in data:
raise Bip270Exception("Missing required json 'merchantData' field")
merchant_data = data['merchantData']
if 'transaction' not in data:
raise Bip270Exception("Missing required json 'transaction' field")
transaction_hex = data['transaction']
if type(transaction_hex) is not str:
raise Bip270Exception("Invalid json 'transaction' field")
if 'refundTo' not in data:
raise Bip270Exception("Missing required json 'refundTo' field")
refundTo = data['refundTo']
if type(refundTo) is not list:
raise Bip270Exception("Invalid json 'refundTo' field")
refund_outputs = [ Output.from_dict(data) for data in refundTo ]
memo = data.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
return klass(merchant_data, transaction_hex, refund_outputs, memo)
def to_dict(self) -> dict:
data = {
'merchantData': self.merchant_data,
'transaction': self.transaction_hex,
'refundTo': [ output.to_dict() for output in self.refund_outputs ],
}
if self.memo:
data['memo'] = self.memo
return data
@classmethod
def from_json(klass, s: str) -> 'Payment':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment, too large")
data = json.loads(s)
return klass.from_dict(data)
def to_json(self) -> str:
return json.dumps(self.to_dict())
class PaymentACK:
MAXIMUM_JSON_LENGTH = 11 * 1000 * 1000
def __init__(self, payment: Payment, memo: Optional[str]=None):
self.payment = payment
self.memo = memo
def to_dict(self):
data = {
'payment': self.payment.to_json(),
}
if self.memo:
data['memo'] = self.memo
return data
@classmethod
def from_dict(klass, data: dict) -> 'PaymentACK':
if 'payment' not in data:
raise Bip270Exception("Missing required json 'payment' field")
memo = data.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
payment = Payment.from_json(data['payment'])
return klass(payment, memo)
def to_json(self) -> str:
data = self.to_dict()
return json.dumps(data)
@classmethod
def from_json(klass, s: str) -> 'PaymentACK':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment ACK, too large")
data = json.loads(s)
return klass.from_dict(data)
def get_payment_request(url: str) -> PaymentRequest:
error = None
response = None
u = urllib.parse.urlparse(url)
if u.scheme in ['http', 'https']:
try:
response = requests.request('GET', url, headers=REQUEST_HEADERS)
response.raise_for_status()
# Guard against `bitcoin:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/bitcoin-paymentrequest":
data = None
error = "payment URL not pointing to a bitcoinSV payment request handling server"
else:
data = response.content
logger.debug('fetched payment request \'%s\' (%d)', url, len(response.content))
except requests.exceptions.RequestException:
data = None
if response is not None:
error = response.content.decode()
else:
error = "payment URL not pointing to a valid server"
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
error = f"unknown scheme {url}"
if error:
raise Bip270Exception(error)
return PaymentRequest.from_json(data)
def make_unsigned_request(req: dict) -> PaymentRequest:
address = req['address']
creation_timestamp = req.get('time')
expiration_seconds = req.get('exp')
if creation_timestamp and type(creation_timestamp) is not int:
creation_timestamp = None
if expiration_seconds and type(expiration_seconds) is not int:
expiration_seconds = None
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
pr = PaymentRequest([ Output(address.to_script(), amount=amount) ])
pr.creation_timestamp = creation_timestamp
if expiration_seconds is not None:
pr.expiration_timestamp = creation_timestamp + expiration_seconds
pr.memo = memo
return pr
class InvoiceStore(object):
def __init__(self, storage):
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
self.paid[txid] = pr.get_id()
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
try:
with open(path, 'r') as f:
d = json.loads(f.read())
self.load(d)
except json.decoder.JSONDecodeError:
logger.exception("")
raise FileImportFailedEncrypted()
except Exception:
logger.exception("")
raise FileImportFailed()
self.save()
def save(self):
l = {}
for k, pr in self.invoices.items():
l[k] = {
'requestor': pr.requestor,
'txid': pr.tx
}
self.storage.put('invoices', l)
def get_status(self, key):
pr = self.get(key)
if pr is None:
logger.debug("[InvoiceStore] get_status() can't find pr for %s", key)
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
paid_list = self.paid.items()
for p in paid_list:
if p[1] == key:
self.paid.pop(p[0])
break
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [invoice for key, invoice in self.invoices.items()
if self.get_status(key) != PR_PAID] | electrumsv/paymentrequest.py |
import json
import os
import time
from typing import Any, List, Optional, Tuple
import urllib.parse
from bitcoinx import TxOutput, Script, Address, classify_output_script
import requests
from .exceptions import FileImportFailed, FileImportFailedEncrypted, Bip270Exception
from .logs import logs
from .util import bfh
logger = logs.get_logger("paymentrequest")
REQUEST_HEADERS = {
'Accept': 'application/bitcoinsv-paymentrequest',
'User-Agent': 'ElectrumSV'
}
ACK_HEADERS = {
'Content-Type': 'application/bitcoinsv-payment',
'Accept': 'application/bitcoinsv-paymentack',
'User-Agent': 'ElectrumSV'
}
# Used for requests.
ca_path = requests.certs.where()
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
class Output:
# FIXME: this should either be removed in favour of TxOutput, or be a lighter wrapper
# around it.
def __init__(self, script: Script, amount: Optional[int]=None,
description: Optional[str]=None):
self.script = script
# TODO: Must not have a JSON string length of 100 bytes.
if description is not None:
description_json = json.dumps(description)
if len(description_json) > 100:
raise Bip270Exception("Output description too long")
self.description = description
self.amount = amount
def address(self):
return classify_output_script(self.script)
def to_tx_output(self):
return TxOutput(self.amount, self.script)
def to_ui_dict(self) -> dict:
return {
'amount': self.amount,
'address': self.address(),
}
def get_address_string(self):
return self.address().to_string()
@classmethod
def from_dict(klass, data: dict) -> 'Output':
if 'script' not in data:
raise Bip270Exception("Missing required 'script' field")
script_hex = data['script']
amount = data.get('amount')
if amount is not None and type(amount) is not int:
raise Bip270Exception("Invalid 'amount' field")
description = data.get('description')
if description is not None and type(description) is not str:
raise Bip270Exception("Invalid 'description' field")
return klass(Script.from_hex(script_hex), amount, description)
def to_dict(self) -> dict:
data = {
'script': self.script.to_hex(),
}
if self.amount and type(self.amount) is int:
data['amount'] = self.amount
if self.description:
data['description'] = self.description
return data
@classmethod
def from_json(klass, s: str) -> 'Output':
data = json.loads(s)
return klass.from_dict(data)
def to_json(self) -> str:
data = self.to_dict()
return json.dumps(data)
class PaymentRequest:
MAXIMUM_JSON_LENGTH = 10 * 1000 * 1000
def __init__(self, outputs, creation_timestamp=None, expiration_timestamp=None, memo=None,
payment_url=None, merchant_data=None):
# This is only used if there is a requestor identity (old openalias, needs rewrite).
self.id = os.urandom(16).hex()
# This is related to identity.
self.requestor = None # known after verify
self.tx = None
self.outputs = outputs
if creation_timestamp is not None:
creation_timestamp = int(creation_timestamp)
else:
creation_timestamp = int(time.time())
self.creation_timestamp = creation_timestamp
if expiration_timestamp is not None:
expiration_timestamp = int(expiration_timestamp)
self.expiration_timestamp = expiration_timestamp
self.memo = memo
self.payment_url = payment_url
self.merchant_data = merchant_data
def __str__(self) -> str:
return self.to_json()
@classmethod
def from_wallet_entry(klass, data: dict) -> 'PaymentRequest':
address = data['address']
amount = data['amount']
memo = data['memo']
creation_timestamp = data.get('time')
expiration_timestamp = None
expiration_seconds = data.get('exp')
if creation_timestamp is not None and expiration_seconds is not None:
expiration_timestamp = creation_timestamp + expiration_seconds
outputs = [ Output(address.to_script(), amount) ]
return klass(outputs, creation_timestamp, expiration_timestamp, memo)
@classmethod
def from_json(klass, s: str) -> 'PaymentRequest':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment request, too large")
d = json.loads(s)
network = d.get('network')
if network != 'bitcoin':
raise Bip270Exception(f"Invalid json network: {network}")
if 'outputs' not in d:
raise Bip270Exception("Missing required json 'outputs' field")
if type(d['outputs']) is not list:
raise Bip270Exception("Invalid json 'outputs' field")
outputs = []
for ui_dict in d['outputs']:
outputs.append(Output.from_dict(ui_dict))
pr = klass(outputs)
if 'creationTimestamp' not in d:
raise Bip270Exception("Missing required json 'creationTimestamp' field")
creation_timestamp = d['creationTimestamp']
if type(creation_timestamp) is not int:
raise Bip270Exception("Invalid json 'creationTimestamp' field")
pr.creation_timestamp = creation_timestamp
expiration_timestamp = d.get('expirationTimestamp')
if expiration_timestamp is not None and type(expiration_timestamp) is not int:
raise Bip270Exception("Invalid json 'expirationTimestamp' field")
pr.expiration_timestamp = expiration_timestamp
memo = d.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
pr.memo = memo
payment_url = d.get('paymentUrl')
if payment_url is not None and type(payment_url) is not str:
raise Bip270Exception("Invalid json 'paymentUrl' field")
pr.payment_url = payment_url
merchant_data = d.get('merchantData')
if merchant_data is not None and type(merchant_data) is not str:
raise Bip270Exception("Invalid json 'merchantData' field")
pr.merchant_data = merchant_data
return pr
def to_json(self) -> str:
d = {}
d['network'] = 'bitcoin'
d['outputs'] = [ output.to_dict() for output in self.outputs ]
d['creationTimestamp'] = self.creation_timestamp
if self.expiration_timestamp is not None:
d['expirationTimestamp'] = self.expiration_timestamp
if self.memo is not None:
d['memo'] = self.memo
if self.payment_url is not None:
d['paymentUrl'] = self.payment_url
if self.merchant_data is not None:
d['merchantData'] = self.merchant_data
return json.dumps(d)
def is_pr(self):
return self.get_amount() != 0
def verify(self, contacts) -> bool:
# the address will be dispayed as requestor
self.requestor = None
return True
def has_expired(self) -> bool:
return self.expiration_timestamp and self.expiration_timestamp < int(time.time())
def get_expiration_date(self) -> int:
return self.expiration_timestamp
def get_amount(self) -> int:
return sum(x.amount for x in self.outputs)
def get_address(self) -> str:
return self.outputs[0].get_address_string()
def get_requestor(self) -> str:
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self) -> str:
return self.error if self.requestor else "No Signature"
def get_memo(self) -> str:
return self.memo
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self) -> List[TxOutput]:
return [output.to_tx_output() for output in self.outputs]
def send_payment(self, transaction_hex: str, refund_address: Address) -> Tuple[bool, str]:
if not self.payment_url:
return False, "no url"
payment_memo = "Paid using ElectrumSV"
payment = Payment(self.merchant_data, transaction_hex, [], payment_memo)
payment.refund_outputs.append(Output(refund_address.to_script()))
parsed_url = urllib.parse.urlparse(self.payment_url)
response = self._make_request(parsed_url.geturl(), payment.to_json())
if response is None:
return False, "Payment Message/PaymentACK Failed"
if response.get_status_code() != 200:
# Propagate 'Bad request' (HTTP 400) messages to the user since they
# contain valuable information.
if response.get_status_code() == 400:
return False, f"{response.get_reason()}: {response.get_content().decode('UTF-8')}"
# Some other errors might display an entire HTML document.
# Hide those and just display the name of the error code.
return False, response.get_reason()
try:
payment_ack = PaymentACK.from_json(response.get_content())
except Exception:
return False, ("PaymentACK could not be processed. Payment was sent; "
"please manually verify that payment was received.")
logger.debug("PaymentACK message received: %s", payment_ack.memo)
return True, payment_ack.memo
# The following function and classes is abstracted to allow unit testing.
def _make_request(self, url, message):
try:
r = requests.post(url, data=message, headers=ACK_HEADERS, verify=ca_path)
except requests.exceptions.SSLError:
logger.exception("Payment Message/PaymentACK")
return None
return self._RequestsResponseWrapper(r)
class _RequestsResponseWrapper:
def __init__(self, response):
self._response = response
def get_status_code(self):
return self._response.status_code
def get_reason(self):
return self._response.reason
def get_content(self):
return self._response.content
class Payment:
MAXIMUM_JSON_LENGTH = 10 * 1000 * 1000
def __init__(self, merchant_data: Any, transaction_hex: str, refund_outputs: List[Output],
memo: Optional[str]=None):
self.merchant_data = merchant_data
self.transaction_hex = transaction_hex
self.refund_outputs = refund_outputs
self.memo = memo
@classmethod
def from_dict(klass, data: dict) -> 'Payment':
if 'merchantData' not in data:
raise Bip270Exception("Missing required json 'merchantData' field")
merchant_data = data['merchantData']
if 'transaction' not in data:
raise Bip270Exception("Missing required json 'transaction' field")
transaction_hex = data['transaction']
if type(transaction_hex) is not str:
raise Bip270Exception("Invalid json 'transaction' field")
if 'refundTo' not in data:
raise Bip270Exception("Missing required json 'refundTo' field")
refundTo = data['refundTo']
if type(refundTo) is not list:
raise Bip270Exception("Invalid json 'refundTo' field")
refund_outputs = [ Output.from_dict(data) for data in refundTo ]
memo = data.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
return klass(merchant_data, transaction_hex, refund_outputs, memo)
def to_dict(self) -> dict:
data = {
'merchantData': self.merchant_data,
'transaction': self.transaction_hex,
'refundTo': [ output.to_dict() for output in self.refund_outputs ],
}
if self.memo:
data['memo'] = self.memo
return data
@classmethod
def from_json(klass, s: str) -> 'Payment':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment, too large")
data = json.loads(s)
return klass.from_dict(data)
def to_json(self) -> str:
return json.dumps(self.to_dict())
class PaymentACK:
MAXIMUM_JSON_LENGTH = 11 * 1000 * 1000
def __init__(self, payment: Payment, memo: Optional[str]=None):
self.payment = payment
self.memo = memo
def to_dict(self):
data = {
'payment': self.payment.to_json(),
}
if self.memo:
data['memo'] = self.memo
return data
@classmethod
def from_dict(klass, data: dict) -> 'PaymentACK':
if 'payment' not in data:
raise Bip270Exception("Missing required json 'payment' field")
memo = data.get('memo')
if memo is not None and type(memo) is not str:
raise Bip270Exception("Invalid json 'memo' field")
payment = Payment.from_json(data['payment'])
return klass(payment, memo)
def to_json(self) -> str:
data = self.to_dict()
return json.dumps(data)
@classmethod
def from_json(klass, s: str) -> 'PaymentACK':
if len(s) > klass.MAXIMUM_JSON_LENGTH:
raise Bip270Exception(f"Invalid payment ACK, too large")
data = json.loads(s)
return klass.from_dict(data)
def get_payment_request(url: str) -> PaymentRequest:
error = None
response = None
u = urllib.parse.urlparse(url)
if u.scheme in ['http', 'https']:
try:
response = requests.request('GET', url, headers=REQUEST_HEADERS)
response.raise_for_status()
# Guard against `bitcoin:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/bitcoin-paymentrequest":
data = None
error = "payment URL not pointing to a bitcoinSV payment request handling server"
else:
data = response.content
logger.debug('fetched payment request \'%s\' (%d)', url, len(response.content))
except requests.exceptions.RequestException:
data = None
if response is not None:
error = response.content.decode()
else:
error = "payment URL not pointing to a valid server"
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
error = f"unknown scheme {url}"
if error:
raise Bip270Exception(error)
return PaymentRequest.from_json(data)
def make_unsigned_request(req: dict) -> PaymentRequest:
address = req['address']
creation_timestamp = req.get('time')
expiration_seconds = req.get('exp')
if creation_timestamp and type(creation_timestamp) is not int:
creation_timestamp = None
if expiration_seconds and type(expiration_seconds) is not int:
expiration_seconds = None
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
pr = PaymentRequest([ Output(address.to_script(), amount=amount) ])
pr.creation_timestamp = creation_timestamp
if expiration_seconds is not None:
pr.expiration_timestamp = creation_timestamp + expiration_seconds
pr.memo = memo
return pr
class InvoiceStore(object):
def __init__(self, storage):
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
self.paid[txid] = pr.get_id()
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
try:
with open(path, 'r') as f:
d = json.loads(f.read())
self.load(d)
except json.decoder.JSONDecodeError:
logger.exception("")
raise FileImportFailedEncrypted()
except Exception:
logger.exception("")
raise FileImportFailed()
self.save()
def save(self):
l = {}
for k, pr in self.invoices.items():
l[k] = {
'requestor': pr.requestor,
'txid': pr.tx
}
self.storage.put('invoices', l)
def get_status(self, key):
pr = self.get(key)
if pr is None:
logger.debug("[InvoiceStore] get_status() can't find pr for %s", key)
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
paid_list = self.paid.items()
for p in paid_list:
if p[1] == key:
self.paid.pop(p[0])
break
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [invoice for key, invoice in self.invoices.items()
if self.get_status(key) != PR_PAID] | 0.440469 | 0.173919 |
"""Remote TensorFlow (RTF) gRPC service provider."""
import io
import queue
from collections import deque
import threading
import sys
from typing import Iterator
import re
import contextlib
import tensorflow as tf
from .proto import rtf_pb2, rtf_pb2_grpc
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
class Builder:
HEADER = (
"import tensorflow as tf\n"
"import sys\n\n"
# "@tf.function\n"
"def f():\n"
)
FOOTER = "\nf()\n"
def __init__(self):
self._statements = []
self._indent_re = re.compile(r"^(\s*)")
def _flush_stdout(self, stmt):
# TODO: convert stmt to AST or other structure.
# Understand if print is a function call or not
# and if it is a call, then add the flush()
# The current solution is unsafe.
if "tf.print(" in stmt and not "output_stream" in stmt:
stmt = rreplace(stmt, ")", ", output_stream=sys.stdout)", -1)
if "print(" in stmt:
indent = self._indent_re.search(stmt)[0]
stmt += f"\n{indent}sys.stdout.flush()"
return stmt
def build(self, stmt):
# TODO: check stmt correctness using its AST or other structure.
stmt = self._flush_stdout(stmt)
self._statements.append(stmt)
def __call__(self):
source = Builder.HEADER
for stmt in self._statements:
source += f" {stmt}\n"
source += Builder.FOOTER
f = compile(source, "useless", "exec")
return eval(f)
class DoubleIO(io.StringIO):
def __init__(self, initial_value="", newline="\n"):
super().__init__(initial_value, newline)
self._lines = deque()
self._buffer = []
def flush(self):
self._lines.append("".join(self._buffer))
self._buffer.clear()
def write(self, s):
self._buffer.append(s)
def close(self):
super().close()
def readline(self):
return self._lines.popleft()
class RTFServicer(rtf_pb2_grpc.RTFServicer):
"""Remote TensorFlow (RTF) gRPC service provider."""
def DefineAndCall(self, request_iterator, context) -> Iterator[rtf_pb2.RTFResponse]:
builder = Builder()
for statement in request_iterator:
builder.build(statement.stmt)
fp = DoubleIO()
stop = False
response_q = queue.Queue()
def executor():
with contextlib.redirect_stdout(fp):
output_value = builder()
fp.close()
response = rtf_pb2.RTFResponse()
if output_value:
response.body = bytes(output_value)
response.status = True
response_q.put(response)
stop = True
def stdout_sender():
while not fp.closed:
try:
line = fp.readline()
response = rtf_pb2.RTFResponse()
response.stdout = line
response.status = True
response_q.put(response)
except IndexError as e:
pass
threads = [
threading.Thread(target=stdout_sender),
threading.Thread(target=executor),
]
for thread in threads:
thread.start()
while True:
if stop:
while not response_q.empty():
yield response_q.get()
break
yield response_q.get() | rtf/service.py |
"""Remote TensorFlow (RTF) gRPC service provider."""
import io
import queue
from collections import deque
import threading
import sys
from typing import Iterator
import re
import contextlib
import tensorflow as tf
from .proto import rtf_pb2, rtf_pb2_grpc
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
class Builder:
HEADER = (
"import tensorflow as tf\n"
"import sys\n\n"
# "@tf.function\n"
"def f():\n"
)
FOOTER = "\nf()\n"
def __init__(self):
self._statements = []
self._indent_re = re.compile(r"^(\s*)")
def _flush_stdout(self, stmt):
# TODO: convert stmt to AST or other structure.
# Understand if print is a function call or not
# and if it is a call, then add the flush()
# The current solution is unsafe.
if "tf.print(" in stmt and not "output_stream" in stmt:
stmt = rreplace(stmt, ")", ", output_stream=sys.stdout)", -1)
if "print(" in stmt:
indent = self._indent_re.search(stmt)[0]
stmt += f"\n{indent}sys.stdout.flush()"
return stmt
def build(self, stmt):
# TODO: check stmt correctness using its AST or other structure.
stmt = self._flush_stdout(stmt)
self._statements.append(stmt)
def __call__(self):
source = Builder.HEADER
for stmt in self._statements:
source += f" {stmt}\n"
source += Builder.FOOTER
f = compile(source, "useless", "exec")
return eval(f)
class DoubleIO(io.StringIO):
def __init__(self, initial_value="", newline="\n"):
super().__init__(initial_value, newline)
self._lines = deque()
self._buffer = []
def flush(self):
self._lines.append("".join(self._buffer))
self._buffer.clear()
def write(self, s):
self._buffer.append(s)
def close(self):
super().close()
def readline(self):
return self._lines.popleft()
class RTFServicer(rtf_pb2_grpc.RTFServicer):
"""Remote TensorFlow (RTF) gRPC service provider."""
def DefineAndCall(self, request_iterator, context) -> Iterator[rtf_pb2.RTFResponse]:
builder = Builder()
for statement in request_iterator:
builder.build(statement.stmt)
fp = DoubleIO()
stop = False
response_q = queue.Queue()
def executor():
with contextlib.redirect_stdout(fp):
output_value = builder()
fp.close()
response = rtf_pb2.RTFResponse()
if output_value:
response.body = bytes(output_value)
response.status = True
response_q.put(response)
stop = True
def stdout_sender():
while not fp.closed:
try:
line = fp.readline()
response = rtf_pb2.RTFResponse()
response.stdout = line
response.status = True
response_q.put(response)
except IndexError as e:
pass
threads = [
threading.Thread(target=stdout_sender),
threading.Thread(target=executor),
]
for thread in threads:
thread.start()
while True:
if stop:
while not response_q.empty():
yield response_q.get()
break
yield response_q.get() | 0.511229 | 0.168139 |
import itertools
from datetime import datetime
from typing import List
import pytest
from pathfinding_service.model import ChannelView
from pathfinding_service.model.token_network import TokenNetwork
from raiden.constants import EMPTY_SIGNATURE
from raiden.messages.path_finding_service import PFSFeeUpdate
from raiden.network.transport.matrix.utils import AddressReachability
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.mediated_transfer.mediation_fee import FeeScheduleState as RaidenFeeSchedule
from raiden.utils.mediation_fees import ppm_fee_per_channel
from raiden.utils.typing import (
Address,
BlockTimeout,
ChainID,
ChannelID,
FeeAmount as FA,
PaymentAmount as PA,
ProportionalFeeAmount,
TokenAmount as TA,
TokenNetworkAddress,
)
from tests.pathfinding.utils import SimpleReachabilityContainer
class PrettyBytes(bytes):
def __repr__(self):
return "a%x" % int.from_bytes(self, byteorder="big")
def a(int_addr) -> Address: # pylint: disable=invalid-name
"""Create an address from an int with a short representation.
This is helpful in tests because
* Address creation is concise
* You can easily match `a(1)` in your test with `a1` in your test output
"""
return Address(PrettyBytes([0] * 19 + [int_addr]))
class TokenNetworkForTests(TokenNetwork):
def __init__(self, channels: List[dict], default_capacity: TA = TA(1000)):
super().__init__(token_network_address=TokenNetworkAddress(a(255)))
# open channels
channel_ids = itertools.count(100)
for chan in channels:
self.handle_channel_opened_event(
channel_identifier=ChannelID(next(channel_ids)),
participant1=a(chan["participant1"]),
participant2=a(chan["participant2"]),
settle_timeout=BlockTimeout(100),
)
cv1: ChannelView = self.G[a(chan["participant1"])][a(chan["participant2"])]["view"]
cv1.capacity = chan.get("capacity1", default_capacity)
cv2: ChannelView = self.G[a(chan["participant2"])][a(chan["participant1"])]["view"]
cv2.capacity = chan.get("capacity2", default_capacity)
# create reachability mapping for testing
self.reachability_state = SimpleReachabilityContainer(
{node: AddressReachability.REACHABLE for node in self.G.nodes}
)
def set_fee(self, node1: int, node2: int, **fee_params):
channel_id = self.G[a(node1)][a(node2)]["view"].channel_id
self.handle_channel_fee_update(
PFSFeeUpdate(
canonical_identifier=CanonicalIdentifier(
chain_identifier=ChainID(61),
token_network_address=self.address,
channel_identifier=channel_id,
),
updating_participant=a(node1),
fee_schedule=RaidenFeeSchedule(**fee_params),
signature=EMPTY_SIGNATURE,
timestamp=datetime.utcnow(),
)
)
def estimate_fee(self, initator: int, target: int, value=PA(100), max_paths=1):
paths = self.get_paths(
source=a(initator),
target=a(target),
value=value,
max_paths=max_paths,
reachability_state=self.reachability_state,
)
if not paths:
return None
return paths[0].estimated_fee
def test_fees_in_balanced_routing(): # pylint: disable=too-many-statements
""" Tests fee estimation in a network where both participants have funds in a channel. """
tn = TokenNetworkForTests(
channels=[dict(participant1=1, participant2=2), dict(participant1=2, participant2=3)]
)
# Make sure that routing works and the default fees are zero
assert tn.estimate_fee(1, 3) == 0
# Fees for the initiator are ignored
tn.set_fee(1, 2, flat=FA(1))
assert tn.estimate_fee(1, 3) == 0
# Node 2 demands fees for incoming transfers
tn.set_fee(2, 1, flat=FA(1))
assert tn.estimate_fee(1, 3) == 1
# Node 2 demands fees for outgoing transfers
tn.set_fee(2, 3, flat=FA(1))
assert tn.estimate_fee(1, 3) == 2
# Same fee in the opposite direction
assert tn.estimate_fee(3, 1) == 2
# Reset fees to zero
tn.set_fee(1, 2)
tn.set_fee(2, 1)
tn.set_fee(2, 3)
# Let's try imbalance fees
# When the fees influence the amount strong that fee(amount) != fee(amount + fee)
# the difference is given as an additional summand.
# Incoming channel
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) == -10
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) == 0
# The opposite fee schedule should give opposite results
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10 + 1
assert tn.estimate_fee(3, 1) == 10
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 10
# Outgoing channel
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10
assert tn.estimate_fee(3, 1) == 10 + 1
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 10 + 1
# The opposite fee schedule should give opposite results
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) == -10 + 1
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) == 0
# Combined fees cancel out
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# And with fee capping, as the amounts even out
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# And with fee capping, as the amounts even out
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# When the range covered by the imbalance_penalty does include the
# necessary balance values, the route should be considered invalid.
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(800), FA(200))])
assert tn.estimate_fee(1, 3) is None
def test_fees_in_unbalanced_routing(): # pylint: disable=too-many-statements
""" Tests fee estimation in a network where only one participant has funds in a channel. """
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=1000, capacity2=0),
dict(participant1=2, participant2=3, capacity1=1000, capacity2=0),
]
)
# Make sure that routing works and the default fees are zero
assert tn.estimate_fee(1, 3) == 0
# Fees for the initiator are ignored
tn.set_fee(1, 2, flat=FA(1))
assert tn.estimate_fee(1, 3) == 0
# Node 2 demands fees for incoming transfers
tn.set_fee(2, 1, flat=FA(1))
assert tn.estimate_fee(1, 3) == 1
# Node 2 demands fees for outgoing transfers
tn.set_fee(2, 3, flat=FA(1))
assert tn.estimate_fee(1, 3) == 2
# No capacity in the opposite direction
assert tn.estimate_fee(3, 1) is None
# Reset fees to zero
tn.set_fee(1, 2)
tn.set_fee(2, 1)
tn.set_fee(2, 3)
# Let's try imbalance fees!
# When approximation iterations matter, those are given as sums of the steps.
# Incoming channel
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# The opposite fee schedule should give opposite results, without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Or zero with fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Outgoing channel
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# The opposite fee schedule should give opposite results
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))])
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Combined fees cancel out
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping fees cancel out as well
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping fees cancel out as well
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# When the range covered by the imbalance_penalty does include the
# necessary balance values, the route should be considered invalid.
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(80), FA(200))])
assert tn.estimate_fee(1, 3) is None
def test_regression_issue_554():
""" Regression test for https://github.com/raiden-network/raiden-services/issues/554 """
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=100, capacity2=0),
dict(participant1=2, participant2=3, capacity1=100, capacity2=0),
]
)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(100), FA(0))])
assert tn.estimate_fee(1, 3) is not None
capacity = TA(100_000)
tn2 = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=capacity, capacity2=0),
dict(participant1=2, participant2=3, capacity1=capacity, capacity2=0),
]
)
tn2.set_fee(
2, 1, imbalance_penalty=[(TA(0), FA(1000)), (capacity // 2, 0), (capacity, FA(1000))]
)
assert tn2.estimate_fee(1, 3, value=PA(10_000)) is not None
@pytest.mark.parametrize(
"flat_fee_cli, prop_fee_cli, estimated_fee",
[
# flat fees
(100, 0, 100 + 100),
(10, 0, 10 + 10),
# proportional fees
(0, 1_000_000, 1000 + 2000), # 100% per hop mediation fee
(0, 100_000, 100 + 110), # 10% per hop mediation fee
(0, 50_000, 50 + 52), # 5% per hop mediation fee
(0, 10_000, 10 + 10), # 1% per hop mediation fee
],
)
def test_compounding_fees(flat_fee_cli, prop_fee_cli, estimated_fee):
"""The transferred amount needs to include the fees for all mediators.
Earlier mediators will apply the proportional fee not only on the payment
amount, but also on the fees for later mediators.
"""
flat_fee = flat_fee_cli // 2
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee_cli))
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2),
dict(participant1=2, participant2=3),
dict(participant1=3, participant2=4),
],
default_capacity=TA(10_000),
)
tn.set_fee(2, 1, flat=flat_fee, proportional=prop_fee)
tn.set_fee(2, 3, flat=flat_fee, proportional=prop_fee)
tn.set_fee(3, 2, flat=flat_fee, proportional=prop_fee)
tn.set_fee(3, 4, flat=flat_fee, proportional=prop_fee)
assert tn.estimate_fee(1, 4, value=PA(1_000)) == estimated_fee
@pytest.mark.parametrize(
"flat_fee, prop_fee_cli, max_lin_imbalance_fee, target_amount, expected_fee",
[
# proportional fees
(0, 1_000_000, 0, 1000, 1000), # 100% per hop mediation fee
(0, 100_000, 0, 1000, 100), # 10% per hop mediation fee
(0, 50_000, 0, 1000, 50), # 5% per hop mediation fee
(0, 10_000, 0, 1000, 10), # 1% per hop mediation fee
(0, 10_000, 0, 100, 1), # 1% per hop mediation fee
(0, 5_000, 0, 100, 1), # 0,5% per hop mediation
(0, 4_999, 0, 100, 0),
(0, 5_000, 0, 99, 0),
# pure flat fee
(50, 0, 0, 1000, 100),
# mixed tests
(10, 100_000, 0, 1000, 121),
(100, 500_000, 0, 1000, 750),
(100, 500_000, 0, 967, 733),
# imbalance fee
(0, 0, 100, 1_000, 10),
(0, 0, 1_000, 1_000, 111),
],
)
def test_fee_estimate(flat_fee, prop_fee_cli, max_lin_imbalance_fee, target_amount, expected_fee):
""" Tests the backwards fee calculation. """
capacity = TA(10_000)
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee_cli))
imbalance_fee = None
if max_lin_imbalance_fee > 0:
# This created a simple asymmetric imbalance fee
imbalance_fee = [(0, 0), (capacity, 0), (2 * capacity, max_lin_imbalance_fee)]
tn = TokenNetworkForTests(
channels=[dict(participant1=1, participant2=2), dict(participant1=2, participant2=3)],
default_capacity=capacity,
)
tn.set_fee(2, 1, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee)
tn.set_fee(2, 3, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee)
assert tn.estimate_fee(1, 3, value=PA(target_amount)) == expected_fee | tests/pathfinding/test_fee_schedule.py | import itertools
from datetime import datetime
from typing import List
import pytest
from pathfinding_service.model import ChannelView
from pathfinding_service.model.token_network import TokenNetwork
from raiden.constants import EMPTY_SIGNATURE
from raiden.messages.path_finding_service import PFSFeeUpdate
from raiden.network.transport.matrix.utils import AddressReachability
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.mediated_transfer.mediation_fee import FeeScheduleState as RaidenFeeSchedule
from raiden.utils.mediation_fees import ppm_fee_per_channel
from raiden.utils.typing import (
Address,
BlockTimeout,
ChainID,
ChannelID,
FeeAmount as FA,
PaymentAmount as PA,
ProportionalFeeAmount,
TokenAmount as TA,
TokenNetworkAddress,
)
from tests.pathfinding.utils import SimpleReachabilityContainer
class PrettyBytes(bytes):
def __repr__(self):
return "a%x" % int.from_bytes(self, byteorder="big")
def a(int_addr) -> Address: # pylint: disable=invalid-name
"""Create an address from an int with a short representation.
This is helpful in tests because
* Address creation is concise
* You can easily match `a(1)` in your test with `a1` in your test output
"""
return Address(PrettyBytes([0] * 19 + [int_addr]))
class TokenNetworkForTests(TokenNetwork):
def __init__(self, channels: List[dict], default_capacity: TA = TA(1000)):
super().__init__(token_network_address=TokenNetworkAddress(a(255)))
# open channels
channel_ids = itertools.count(100)
for chan in channels:
self.handle_channel_opened_event(
channel_identifier=ChannelID(next(channel_ids)),
participant1=a(chan["participant1"]),
participant2=a(chan["participant2"]),
settle_timeout=BlockTimeout(100),
)
cv1: ChannelView = self.G[a(chan["participant1"])][a(chan["participant2"])]["view"]
cv1.capacity = chan.get("capacity1", default_capacity)
cv2: ChannelView = self.G[a(chan["participant2"])][a(chan["participant1"])]["view"]
cv2.capacity = chan.get("capacity2", default_capacity)
# create reachability mapping for testing
self.reachability_state = SimpleReachabilityContainer(
{node: AddressReachability.REACHABLE for node in self.G.nodes}
)
def set_fee(self, node1: int, node2: int, **fee_params):
channel_id = self.G[a(node1)][a(node2)]["view"].channel_id
self.handle_channel_fee_update(
PFSFeeUpdate(
canonical_identifier=CanonicalIdentifier(
chain_identifier=ChainID(61),
token_network_address=self.address,
channel_identifier=channel_id,
),
updating_participant=a(node1),
fee_schedule=RaidenFeeSchedule(**fee_params),
signature=EMPTY_SIGNATURE,
timestamp=datetime.utcnow(),
)
)
def estimate_fee(self, initator: int, target: int, value=PA(100), max_paths=1):
paths = self.get_paths(
source=a(initator),
target=a(target),
value=value,
max_paths=max_paths,
reachability_state=self.reachability_state,
)
if not paths:
return None
return paths[0].estimated_fee
def test_fees_in_balanced_routing(): # pylint: disable=too-many-statements
""" Tests fee estimation in a network where both participants have funds in a channel. """
tn = TokenNetworkForTests(
channels=[dict(participant1=1, participant2=2), dict(participant1=2, participant2=3)]
)
# Make sure that routing works and the default fees are zero
assert tn.estimate_fee(1, 3) == 0
# Fees for the initiator are ignored
tn.set_fee(1, 2, flat=FA(1))
assert tn.estimate_fee(1, 3) == 0
# Node 2 demands fees for incoming transfers
tn.set_fee(2, 1, flat=FA(1))
assert tn.estimate_fee(1, 3) == 1
# Node 2 demands fees for outgoing transfers
tn.set_fee(2, 3, flat=FA(1))
assert tn.estimate_fee(1, 3) == 2
# Same fee in the opposite direction
assert tn.estimate_fee(3, 1) == 2
# Reset fees to zero
tn.set_fee(1, 2)
tn.set_fee(2, 1)
tn.set_fee(2, 3)
# Let's try imbalance fees
# When the fees influence the amount strong that fee(amount) != fee(amount + fee)
# the difference is given as an additional summand.
# Incoming channel
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) == -10
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) == 0
# The opposite fee schedule should give opposite results
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10 + 1
assert tn.estimate_fee(3, 1) == 10
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 10
# Outgoing channel
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10
assert tn.estimate_fee(3, 1) == 10 + 1
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(200))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 10 + 1
# The opposite fee schedule should give opposite results
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) == -10 + 1
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(200)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) == 0
# Combined fees cancel out
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# And with fee capping, as the amounts even out
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(2000), FA(20))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# And with fee capping, as the amounts even out
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(2000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) == 0
# When the range covered by the imbalance_penalty does include the
# necessary balance values, the route should be considered invalid.
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(800), FA(200))])
assert tn.estimate_fee(1, 3) is None
def test_fees_in_unbalanced_routing(): # pylint: disable=too-many-statements
""" Tests fee estimation in a network where only one participant has funds in a channel. """
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=1000, capacity2=0),
dict(participant1=2, participant2=3, capacity1=1000, capacity2=0),
]
)
# Make sure that routing works and the default fees are zero
assert tn.estimate_fee(1, 3) == 0
# Fees for the initiator are ignored
tn.set_fee(1, 2, flat=FA(1))
assert tn.estimate_fee(1, 3) == 0
# Node 2 demands fees for incoming transfers
tn.set_fee(2, 1, flat=FA(1))
assert tn.estimate_fee(1, 3) == 1
# Node 2 demands fees for outgoing transfers
tn.set_fee(2, 3, flat=FA(1))
assert tn.estimate_fee(1, 3) == 2
# No capacity in the opposite direction
assert tn.estimate_fee(3, 1) is None
# Reset fees to zero
tn.set_fee(1, 2)
tn.set_fee(2, 1)
tn.set_fee(2, 3)
# Let's try imbalance fees!
# When approximation iterations matter, those are given as sums of the steps.
# Incoming channel
# Without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# The opposite fee schedule should give opposite results, without fee capping
tn.set_fee(2, 3, cap_fees=False)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10 + 1
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Or zero with fee capping
tn.set_fee(2, 3, cap_fees=True)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Outgoing channel
# Without fee capping
tn.set_fee(2, 1, cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=False)
assert tn.estimate_fee(1, 3) == -10
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping
tn.set_fee(2, 1, cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(100))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# The opposite fee schedule should give opposite results
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(100)), (TA(1000), FA(0))])
assert tn.estimate_fee(1, 3) == 10
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Combined fees cancel out
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping fees cancel out as well
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(1000), FA(20))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# Works without fee capping
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=False)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=False)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# With fee capping fees cancel out as well
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=True)
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(20)), (TA(1000), FA(0))], cap_fees=True)
assert tn.estimate_fee(1, 3) == 0
assert tn.estimate_fee(3, 1) is None # no balance in channel
# When the range covered by the imbalance_penalty does include the
# necessary balance values, the route should be considered invalid.
tn.set_fee(2, 3, imbalance_penalty=[(TA(0), FA(0)), (TA(80), FA(200))])
assert tn.estimate_fee(1, 3) is None
def test_regression_issue_554():
""" Regression test for https://github.com/raiden-network/raiden-services/issues/554 """
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=100, capacity2=0),
dict(participant1=2, participant2=3, capacity1=100, capacity2=0),
]
)
tn.set_fee(2, 1, imbalance_penalty=[(TA(0), FA(20)), (TA(100), FA(0))])
assert tn.estimate_fee(1, 3) is not None
capacity = TA(100_000)
tn2 = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2, capacity1=capacity, capacity2=0),
dict(participant1=2, participant2=3, capacity1=capacity, capacity2=0),
]
)
tn2.set_fee(
2, 1, imbalance_penalty=[(TA(0), FA(1000)), (capacity // 2, 0), (capacity, FA(1000))]
)
assert tn2.estimate_fee(1, 3, value=PA(10_000)) is not None
@pytest.mark.parametrize(
"flat_fee_cli, prop_fee_cli, estimated_fee",
[
# flat fees
(100, 0, 100 + 100),
(10, 0, 10 + 10),
# proportional fees
(0, 1_000_000, 1000 + 2000), # 100% per hop mediation fee
(0, 100_000, 100 + 110), # 10% per hop mediation fee
(0, 50_000, 50 + 52), # 5% per hop mediation fee
(0, 10_000, 10 + 10), # 1% per hop mediation fee
],
)
def test_compounding_fees(flat_fee_cli, prop_fee_cli, estimated_fee):
"""The transferred amount needs to include the fees for all mediators.
Earlier mediators will apply the proportional fee not only on the payment
amount, but also on the fees for later mediators.
"""
flat_fee = flat_fee_cli // 2
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee_cli))
tn = TokenNetworkForTests(
channels=[
dict(participant1=1, participant2=2),
dict(participant1=2, participant2=3),
dict(participant1=3, participant2=4),
],
default_capacity=TA(10_000),
)
tn.set_fee(2, 1, flat=flat_fee, proportional=prop_fee)
tn.set_fee(2, 3, flat=flat_fee, proportional=prop_fee)
tn.set_fee(3, 2, flat=flat_fee, proportional=prop_fee)
tn.set_fee(3, 4, flat=flat_fee, proportional=prop_fee)
assert tn.estimate_fee(1, 4, value=PA(1_000)) == estimated_fee
@pytest.mark.parametrize(
"flat_fee, prop_fee_cli, max_lin_imbalance_fee, target_amount, expected_fee",
[
# proportional fees
(0, 1_000_000, 0, 1000, 1000), # 100% per hop mediation fee
(0, 100_000, 0, 1000, 100), # 10% per hop mediation fee
(0, 50_000, 0, 1000, 50), # 5% per hop mediation fee
(0, 10_000, 0, 1000, 10), # 1% per hop mediation fee
(0, 10_000, 0, 100, 1), # 1% per hop mediation fee
(0, 5_000, 0, 100, 1), # 0,5% per hop mediation
(0, 4_999, 0, 100, 0),
(0, 5_000, 0, 99, 0),
# pure flat fee
(50, 0, 0, 1000, 100),
# mixed tests
(10, 100_000, 0, 1000, 121),
(100, 500_000, 0, 1000, 750),
(100, 500_000, 0, 967, 733),
# imbalance fee
(0, 0, 100, 1_000, 10),
(0, 0, 1_000, 1_000, 111),
],
)
def test_fee_estimate(flat_fee, prop_fee_cli, max_lin_imbalance_fee, target_amount, expected_fee):
""" Tests the backwards fee calculation. """
capacity = TA(10_000)
prop_fee = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee_cli))
imbalance_fee = None
if max_lin_imbalance_fee > 0:
# This created a simple asymmetric imbalance fee
imbalance_fee = [(0, 0), (capacity, 0), (2 * capacity, max_lin_imbalance_fee)]
tn = TokenNetworkForTests(
channels=[dict(participant1=1, participant2=2), dict(participant1=2, participant2=3)],
default_capacity=capacity,
)
tn.set_fee(2, 1, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee)
tn.set_fee(2, 3, flat=flat_fee, proportional=prop_fee, imbalance_penalty=imbalance_fee)
assert tn.estimate_fee(1, 3, value=PA(target_amount)) == expected_fee | 0.856152 | 0.381076 |
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class BlockstorageClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.core.BlockstorageClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, work_request_client=None, **kwargs):
"""
Creates a new BlockstorageClientCompositeOperations object
:param BlockstorageClient client:
The service client which will be wrapped by this object
:param oci.work_requests.WorkRequestClient work_request_client: (optional)
The work request service client which will be used to wait for work request states. Default is None.
"""
self.client = client
self._work_request_client = work_request_client if work_request_client else oci.work_requests.WorkRequestClient(self.client._config, **self.client._kwargs)
def copy_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, copy_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.copy_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param CopyBootVolumeBackupDetails copy_boot_volume_backup_details: (required)
Request to create a cross-region copy of given boot volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.copy_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.copy_boot_volume_backup(boot_volume_backup_id, copy_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def copy_volume_backup_and_wait_for_state(self, volume_backup_id, copy_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.copy_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param CopyVolumeBackupDetails copy_volume_backup_details: (required)
Request to create a cross-region copy of given backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.copy_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.copy_volume_backup(volume_backup_id, copy_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_boot_volume_and_wait_for_state(self, create_boot_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param CreateBootVolumeDetails create_boot_volume_details: (required)
Request to create a new boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_boot_volume(create_boot_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_boot_volume_backup_and_wait_for_state(self, create_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param CreateBootVolumeBackupDetails create_boot_volume_backup_details: (required)
Request to create a new backup of given boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_boot_volume_backup(create_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_and_wait_for_state(self, create_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param CreateVolumeDetails create_volume_details: (required)
Request to create a new volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume(create_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_backup_and_wait_for_state(self, create_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param CreateVolumeBackupDetails create_volume_backup_details: (required)
Request to create a new backup of given volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_backup(create_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_group_and_wait_for_state(self, create_volume_group_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param CreateVolumeGroupDetails create_volume_group_details: (required)
Request to create a new volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_group(create_volume_group_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_group_backup_and_wait_for_state(self, create_volume_group_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param CreateVolumeGroupBackupDetails create_volume_group_backup_details: (required)
Request to create a new backup group of given volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_group_backup(create_volume_group_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_boot_volume_and_wait_for_state(self, boot_volume_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param str boot_volume_id: (required)
The OCID of the boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_boot_volume(boot_volume_id)
operation_result = None
try:
operation_result = self.client.delete_boot_volume(boot_volume_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_boot_volume_backup(boot_volume_backup_id)
operation_result = None
try:
operation_result = self.client.delete_boot_volume_backup(boot_volume_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_and_wait_for_state(self, volume_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param str volume_id: (required)
The OCID of the volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume(volume_id)
operation_result = None
try:
operation_result = self.client.delete_volume(volume_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_backup_and_wait_for_state(self, volume_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_backup(volume_backup_id)
operation_result = None
try:
operation_result = self.client.delete_volume_backup(volume_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_group_and_wait_for_state(self, volume_group_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param str volume_group_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_group(volume_group_id)
operation_result = None
try:
operation_result = self.client.delete_volume_group(volume_group_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_group_backup_and_wait_for_state(self, volume_group_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param str volume_group_backup_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_group_backup(volume_group_backup_id)
operation_result = None
try:
operation_result = self.client.delete_volume_group_backup(volume_group_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_boot_volume_and_wait_for_state(self, boot_volume_id, update_boot_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param str boot_volume_id: (required)
The OCID of the boot volume.
:param UpdateBootVolumeDetails update_boot_volume_details: (required)
Update boot volume's display name.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_boot_volume(boot_volume_id, update_boot_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, update_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param UpdateBootVolumeBackupDetails update_boot_volume_backup_details: (required)
Update boot volume backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_boot_volume_backup(boot_volume_backup_id, update_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_and_wait_for_state(self, volume_id, update_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param str volume_id: (required)
The OCID of the volume.
:param UpdateVolumeDetails update_volume_details: (required)
Update volume's display name. Avoid entering confidential information.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume(volume_id, update_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_backup_and_wait_for_state(self, volume_backup_id, update_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param UpdateVolumeBackupDetails update_volume_backup_details: (required)
Update volume backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_backup(volume_backup_id, update_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_group_and_wait_for_state(self, volume_group_id, update_volume_group_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param str volume_group_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group.
:param UpdateVolumeGroupDetails update_volume_group_details: (required)
Update volume group's set of volumes and/or display name
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_group(volume_group_id, update_volume_group_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_group_backup_and_wait_for_state(self, volume_group_backup_id, update_volume_group_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param str volume_group_backup_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group backup.
:param UpdateVolumeGroupBackupDetails update_volume_group_backup_details: (required)
Update volume group backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_group_backup(volume_group_backup_id, update_volume_group_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) | darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/blockstorage_client_composite_operations.py |
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class BlockstorageClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.core.BlockstorageClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, work_request_client=None, **kwargs):
"""
Creates a new BlockstorageClientCompositeOperations object
:param BlockstorageClient client:
The service client which will be wrapped by this object
:param oci.work_requests.WorkRequestClient work_request_client: (optional)
The work request service client which will be used to wait for work request states. Default is None.
"""
self.client = client
self._work_request_client = work_request_client if work_request_client else oci.work_requests.WorkRequestClient(self.client._config, **self.client._kwargs)
def copy_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, copy_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.copy_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param CopyBootVolumeBackupDetails copy_boot_volume_backup_details: (required)
Request to create a cross-region copy of given boot volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.copy_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.copy_boot_volume_backup(boot_volume_backup_id, copy_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def copy_volume_backup_and_wait_for_state(self, volume_backup_id, copy_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.copy_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param CopyVolumeBackupDetails copy_volume_backup_details: (required)
Request to create a cross-region copy of given backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.copy_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.copy_volume_backup(volume_backup_id, copy_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_boot_volume_and_wait_for_state(self, create_boot_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param CreateBootVolumeDetails create_boot_volume_details: (required)
Request to create a new boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_boot_volume(create_boot_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_boot_volume_backup_and_wait_for_state(self, create_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param CreateBootVolumeBackupDetails create_boot_volume_backup_details: (required)
Request to create a new backup of given boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_boot_volume_backup(create_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_and_wait_for_state(self, create_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param CreateVolumeDetails create_volume_details: (required)
Request to create a new volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume(create_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_backup_and_wait_for_state(self, create_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param CreateVolumeBackupDetails create_volume_backup_details: (required)
Request to create a new backup of given volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_backup(create_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_group_and_wait_for_state(self, create_volume_group_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param CreateVolumeGroupDetails create_volume_group_details: (required)
Request to create a new volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_group(create_volume_group_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_volume_group_backup_and_wait_for_state(self, create_volume_group_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.create_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param CreateVolumeGroupBackupDetails create_volume_group_backup_details: (required)
Request to create a new backup group of given volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.create_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_volume_group_backup(create_volume_group_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_boot_volume_and_wait_for_state(self, boot_volume_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param str boot_volume_id: (required)
The OCID of the boot volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_boot_volume(boot_volume_id)
operation_result = None
try:
operation_result = self.client.delete_boot_volume(boot_volume_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_boot_volume_backup(boot_volume_backup_id)
operation_result = None
try:
operation_result = self.client.delete_boot_volume_backup(boot_volume_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_and_wait_for_state(self, volume_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param str volume_id: (required)
The OCID of the volume.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume(volume_id)
operation_result = None
try:
operation_result = self.client.delete_volume(volume_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_backup_and_wait_for_state(self, volume_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_backup(volume_backup_id)
operation_result = None
try:
operation_result = self.client.delete_volume_backup(volume_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_group_and_wait_for_state(self, volume_group_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param str volume_group_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_group(volume_group_id)
operation_result = None
try:
operation_result = self.client.delete_volume_group(volume_group_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_volume_group_backup_and_wait_for_state(self, volume_group_backup_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.delete_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param str volume_group_backup_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group backup.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.delete_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_volume_group_backup(volume_group_backup_id)
operation_result = None
try:
operation_result = self.client.delete_volume_group_backup(volume_group_backup_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_boot_volume_and_wait_for_state(self, boot_volume_id, update_boot_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_boot_volume` and waits for the :py:class:`~oci.core.models.BootVolume` acted upon
to enter the given state(s).
:param str boot_volume_id: (required)
The OCID of the boot volume.
:param UpdateBootVolumeDetails update_boot_volume_details: (required)
Update boot volume's display name.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_boot_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_boot_volume(boot_volume_id, update_boot_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_boot_volume_backup_and_wait_for_state(self, boot_volume_backup_id, update_boot_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_boot_volume_backup` and waits for the :py:class:`~oci.core.models.BootVolumeBackup` acted upon
to enter the given state(s).
:param str boot_volume_backup_id: (required)
The OCID of the boot volume backup.
:param UpdateBootVolumeBackupDetails update_boot_volume_backup_details: (required)
Update boot volume backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.BootVolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_boot_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_boot_volume_backup(boot_volume_backup_id, update_boot_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_boot_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_and_wait_for_state(self, volume_id, update_volume_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume` and waits for the :py:class:`~oci.core.models.Volume` acted upon
to enter the given state(s).
:param str volume_id: (required)
The OCID of the volume.
:param UpdateVolumeDetails update_volume_details: (required)
Update volume's display name. Avoid entering confidential information.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.Volume.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume(volume_id, update_volume_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_backup_and_wait_for_state(self, volume_backup_id, update_volume_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_backup` and waits for the :py:class:`~oci.core.models.VolumeBackup` acted upon
to enter the given state(s).
:param str volume_backup_id: (required)
The OCID of the volume backup.
:param UpdateVolumeBackupDetails update_volume_backup_details: (required)
Update volume backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_backup(volume_backup_id, update_volume_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_group_and_wait_for_state(self, volume_group_id, update_volume_group_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_group` and waits for the :py:class:`~oci.core.models.VolumeGroup` acted upon
to enter the given state(s).
:param str volume_group_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group.
:param UpdateVolumeGroupDetails update_volume_group_details: (required)
Update volume group's set of volumes and/or display name
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_group`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_group(volume_group_id, update_volume_group_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_volume_group_backup_and_wait_for_state(self, volume_group_backup_id, update_volume_group_backup_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.core.BlockstorageClient.update_volume_group_backup` and waits for the :py:class:`~oci.core.models.VolumeGroupBackup` acted upon
to enter the given state(s).
:param str volume_group_backup_id: (required)
The Oracle Cloud ID (OCID) that uniquely identifies the volume group backup.
:param UpdateVolumeGroupBackupDetails update_volume_group_backup_details: (required)
Update volume group backup fields
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.core.models.VolumeGroupBackup.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.core.BlockstorageClient.update_volume_group_backup`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_volume_group_backup(volume_group_backup_id, update_volume_group_backup_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_volume_group_backup(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) | 0.768255 | 0.348036 |
import argparse
from processfile import ProcessFile
def read_args():
"""
Read the arguments from command line
:return:
"""
parser = argparse.ArgumentParser(description="Decode a given origami matrices to a text file.")
parser.add_argument("-f", "--file_in", help="File to decode", required=True)
parser.add_argument("-o", "--file_out", help="File to write output", required=True)
parser.add_argument("-fz", "--file_size", help="File size that will be decoded", type=int, required=True)
parser.add_argument('-tp', '--threshold_parity',
help='Minimum weight for a parity bit cell to be consider that as an error', default=2, type=int)
parser.add_argument("-td", "--threshold_data",
help='Minimum weight for a data bit cell to be consider as an error', default=2, type=int)
parser.add_argument("-v", "--verbose", help="Print details on the console. "
"0 -> error, 1 -> debug, 2 -> info, 3 -> warning", default=0, type=int)
parser.add_argument("-r", "--redundancy", help="How much redundancy was used during encoding",
default=50, type=float)
parser.add_argument("-ior", "--individual_origami_info", help="Store individual origami information",
action='store_true', default=True)
parser.add_argument("-e", "--error", help="Maximum number of error that the algorithm "
"will try to fix", type=int, default=8)
parser.add_argument("-fp", "--false_positive", help="0 can also be 1.", type=int, default=0)
parser.add_argument("-d", "--degree", help="Degree old/new", default="new", type=str)
parser.add_argument("-cf", "--correct_file", help="Original encoded file. Helps to check the status automatically."
, type=str, default=False)
args = parser.parse_args()
return args
def main():
args = read_args()
dnam_decode = ProcessFile(redundancy=args.redundancy, verbose=args.verbose, degree=args.degree)
dnam_decode.decode(args.file_in, args.file_out, args.file_size,
threshold_data=args.threshold_data,
threshold_parity=args.threshold_parity,
maximum_number_of_error=args.error,
false_positive=args.false_positive,
individual_origami_info=args.individual_origami_info,
correct_file=args.correct_file)
if __name__ == '__main__':
main(); | error_correction/decode.py | import argparse
from processfile import ProcessFile
def read_args():
"""
Read the arguments from command line
:return:
"""
parser = argparse.ArgumentParser(description="Decode a given origami matrices to a text file.")
parser.add_argument("-f", "--file_in", help="File to decode", required=True)
parser.add_argument("-o", "--file_out", help="File to write output", required=True)
parser.add_argument("-fz", "--file_size", help="File size that will be decoded", type=int, required=True)
parser.add_argument('-tp', '--threshold_parity',
help='Minimum weight for a parity bit cell to be consider that as an error', default=2, type=int)
parser.add_argument("-td", "--threshold_data",
help='Minimum weight for a data bit cell to be consider as an error', default=2, type=int)
parser.add_argument("-v", "--verbose", help="Print details on the console. "
"0 -> error, 1 -> debug, 2 -> info, 3 -> warning", default=0, type=int)
parser.add_argument("-r", "--redundancy", help="How much redundancy was used during encoding",
default=50, type=float)
parser.add_argument("-ior", "--individual_origami_info", help="Store individual origami information",
action='store_true', default=True)
parser.add_argument("-e", "--error", help="Maximum number of error that the algorithm "
"will try to fix", type=int, default=8)
parser.add_argument("-fp", "--false_positive", help="0 can also be 1.", type=int, default=0)
parser.add_argument("-d", "--degree", help="Degree old/new", default="new", type=str)
parser.add_argument("-cf", "--correct_file", help="Original encoded file. Helps to check the status automatically."
, type=str, default=False)
args = parser.parse_args()
return args
def main():
args = read_args()
dnam_decode = ProcessFile(redundancy=args.redundancy, verbose=args.verbose, degree=args.degree)
dnam_decode.decode(args.file_in, args.file_out, args.file_size,
threshold_data=args.threshold_data,
threshold_parity=args.threshold_parity,
maximum_number_of_error=args.error,
false_positive=args.false_positive,
individual_origami_info=args.individual_origami_info,
correct_file=args.correct_file)
if __name__ == '__main__':
main(); | 0.635336 | 0.226441 |
# importing defined funcions from file and modules
from functions import draw_shape
import functions
from functions import orders
from functions import sales
from os.path import join
from functions import place_record, rotate_record, drop_needle
from datetime import datetime
# Example of a basic print statement
# Restaurant name
print("Burgers on the Grill")
# Join strings using the join feature
weekdays = "Tue, Wed, Thurs, Fri "
weekends = " Sat, Sun"
opening_hour= "10am"
closing_hour = "8pm"
print("We are OPEN: ", join(weekdays, weekends))
# Create my own join function
def myjoin(*args):
joined_string = args[0]
for arg in args[1:]:
joined_string += '-' + arg
return joined_string
print("Our hours are: ", myjoin(opening_hour, closing_hour), "\n")
# Call a function from another file - "functions.py"
def play_record(album):
place_record(album)
rotate_record(album)
drop_needle(album)
next_album = "Bubble Gum / Mr. Loco"
play_record(next_album)
# Use None as a placeholder in an if statement
session_id = None
if session_id is None:
print("\nWELCOME! Please take a look at our menu!\n")
# Example of .append in Python
def update_order(new_item, current_order=None):
if current_order is None:
current_order = []
current_order.append(new_item)
return current_order
order1 = update_order({'item': 'burger', 'cost': '3.50'})
order1 = update_order({'item': 'soda', 'cost': '1.50'}, order1)
order2 = update_order({'item': 'soda', 'cost': '1.50'})
print("Order#1", order1)
print("Order#2", order2, "\n")
# Extract the cost for the items from order1
values = [val['cost'] for val in order1]
values_floats = []
for item in values:
values_floats.append(float(item))
# Creating a SUM calculator
def SUM(num1, num2):
sum = num1 + num2
return sum
sum = SUM(values_floats[0], values_floats[1])
print("Order#1 Total: $", sum)
# Extract the cost for the items from order2
values = [val['cost'] for val in order2]
values_floats = []
for item in values:
values_floats.append(float(item))
# Creating a SUM calculator
def SUM(num1):
sum = num1
return sum
sum = SUM(values_floats[0])
print("Order#2 Total: $", sum, "\n")
# Control the case of the letters
def loudly(text):
loudly = text.upper()
return loudly
def politely(text):
politely = text.lower()
return politely
loud = loudly("We are preparing your order!")
polite = politely("Please take a seat.")
print(loud, polite, '\n')
# Set an arbitrary number of arguments.
def shout_strings(*args):
for argument in args:
print(argument.upper())
shout_strings("Get ready to taste our delicious burgers! (っ ͡⚈ ͜ʖ ͡⚈)っ",
"AND here is a warm drink on the house",
"""
▄▀ ▄▀
▀ ▀
█▀▀▀▀▀█▄
█░░░░░█─█
▀▄▄▄▄▄▀▀
\n""")
# Cut off your string after a specified length.
def cut_string(length, *sentences):
for sentence in sentences:
print(sentence[:length])
cut_string(26, "Hope you enjoyed the food." , "( ͡▀̿ ̿ ‿っ ͡▀̿ ̿ )✌ \n ")
# Define any number of arguments and call it
def arbitrary_args(**kwargs):
print(kwargs.get('line_break'))
arbitrary_args(secret_recipe="XXXXXXXX", ketchup="mayo", cheese="cheddar", line_break=" ")
# Use {}.format to input more strings into strings
print("The {name} is a {adjective} place to go. I'm going to rate it {feeling}.\n\n".format(name="Burgers on the Grill", adjective="great", feeling="10/10",))
# Build a function to print the total counts
print("Managerial Accounting:")
def accounting(**products_dict):
for name, count in products_dict.items():
orders(name, count)
accounting(Burger='79', Drink='114')
print("\n", end="")
# Use a dictionary item list for the arguments
def accounting(**products_dict):
for name, price in products_dict.items():
sales(name, price)
sales_dict = {'Burger': 276.5, 'Drinks': 171}
accounting(**sales_dict)
print("\n", end="")
# Select the values from the dictionary list and add them and print the total
total = sales_dict.get('Burger') + sales_dict.get('Drinks')
def total_sales(total):
print("Today we made ${}!\n".format(total))
returned_value = total_sales(float(total))
print("\n")
# Use a Python decorator to tell time
def info(func):
def inner():
print("What time is it?")
func()
return inner()
@info
def time():
now = datetime.now()
current_time = now.strftime("%H:%M")
print("It is ", current_time, "\nPSH! No, it's burger time!")
# Call function from file and Print the the Burger Logo
def draw_shape(shape_name, character, line_breaks):
shape = functions.draw_shape(shape_name, character)
if not line_breaks:
print(shape[1:-1])
else:
print(shape)
draw_shape(shape_name="burger", character='o', line_breaks=True) | PRINT.py |
# importing defined funcions from file and modules
from functions import draw_shape
import functions
from functions import orders
from functions import sales
from os.path import join
from functions import place_record, rotate_record, drop_needle
from datetime import datetime
# Example of a basic print statement
# Restaurant name
print("Burgers on the Grill")
# Join strings using the join feature
weekdays = "Tue, Wed, Thurs, Fri "
weekends = " Sat, Sun"
opening_hour= "10am"
closing_hour = "8pm"
print("We are OPEN: ", join(weekdays, weekends))
# Create my own join function
def myjoin(*args):
joined_string = args[0]
for arg in args[1:]:
joined_string += '-' + arg
return joined_string
print("Our hours are: ", myjoin(opening_hour, closing_hour), "\n")
# Call a function from another file - "functions.py"
def play_record(album):
place_record(album)
rotate_record(album)
drop_needle(album)
next_album = "Bubble Gum / Mr. Loco"
play_record(next_album)
# Use None as a placeholder in an if statement
session_id = None
if session_id is None:
print("\nWELCOME! Please take a look at our menu!\n")
# Example of .append in Python
def update_order(new_item, current_order=None):
if current_order is None:
current_order = []
current_order.append(new_item)
return current_order
order1 = update_order({'item': 'burger', 'cost': '3.50'})
order1 = update_order({'item': 'soda', 'cost': '1.50'}, order1)
order2 = update_order({'item': 'soda', 'cost': '1.50'})
print("Order#1", order1)
print("Order#2", order2, "\n")
# Extract the cost for the items from order1
values = [val['cost'] for val in order1]
values_floats = []
for item in values:
values_floats.append(float(item))
# Creating a SUM calculator
def SUM(num1, num2):
sum = num1 + num2
return sum
sum = SUM(values_floats[0], values_floats[1])
print("Order#1 Total: $", sum)
# Extract the cost for the items from order2
values = [val['cost'] for val in order2]
values_floats = []
for item in values:
values_floats.append(float(item))
# Creating a SUM calculator
def SUM(num1):
sum = num1
return sum
sum = SUM(values_floats[0])
print("Order#2 Total: $", sum, "\n")
# Control the case of the letters
def loudly(text):
loudly = text.upper()
return loudly
def politely(text):
politely = text.lower()
return politely
loud = loudly("We are preparing your order!")
polite = politely("Please take a seat.")
print(loud, polite, '\n')
# Set an arbitrary number of arguments.
def shout_strings(*args):
for argument in args:
print(argument.upper())
shout_strings("Get ready to taste our delicious burgers! (っ ͡⚈ ͜ʖ ͡⚈)っ",
"AND here is a warm drink on the house",
"""
▄▀ ▄▀
▀ ▀
█▀▀▀▀▀█▄
█░░░░░█─█
▀▄▄▄▄▄▀▀
\n""")
# Cut off your string after a specified length.
def cut_string(length, *sentences):
for sentence in sentences:
print(sentence[:length])
cut_string(26, "Hope you enjoyed the food." , "( ͡▀̿ ̿ ‿っ ͡▀̿ ̿ )✌ \n ")
# Define any number of arguments and call it
def arbitrary_args(**kwargs):
print(kwargs.get('line_break'))
arbitrary_args(secret_recipe="XXXXXXXX", ketchup="mayo", cheese="cheddar", line_break=" ")
# Use {}.format to input more strings into strings
print("The {name} is a {adjective} place to go. I'm going to rate it {feeling}.\n\n".format(name="Burgers on the Grill", adjective="great", feeling="10/10",))
# Build a function to print the total counts
print("Managerial Accounting:")
def accounting(**products_dict):
for name, count in products_dict.items():
orders(name, count)
accounting(Burger='79', Drink='114')
print("\n", end="")
# Use a dictionary item list for the arguments
def accounting(**products_dict):
for name, price in products_dict.items():
sales(name, price)
sales_dict = {'Burger': 276.5, 'Drinks': 171}
accounting(**sales_dict)
print("\n", end="")
# Select the values from the dictionary list and add them and print the total
total = sales_dict.get('Burger') + sales_dict.get('Drinks')
def total_sales(total):
print("Today we made ${}!\n".format(total))
returned_value = total_sales(float(total))
print("\n")
# Use a Python decorator to tell time
def info(func):
def inner():
print("What time is it?")
func()
return inner()
@info
def time():
now = datetime.now()
current_time = now.strftime("%H:%M")
print("It is ", current_time, "\nPSH! No, it's burger time!")
# Call function from file and Print the the Burger Logo
def draw_shape(shape_name, character, line_breaks):
shape = functions.draw_shape(shape_name, character)
if not line_breaks:
print(shape[1:-1])
else:
print(shape)
draw_shape(shape_name="burger", character='o', line_breaks=True) | 0.438304 | 0.307618 |
import numpy as np
#%% Corresponding Y generation
def Syn_Generation_Y1(X, W, sigma):
# No of samples
n = len(X)
t = len(X[0][:,0])
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
return Output_Y
#%% Corresponding Y generation
def Syn_Generation_Y2(X, W, sigma):
# No of samples
n = len(X)
t = len(X[0][:,0])
d = int(len(X[0][0,:])/2)
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:d]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
return Output_Y
#%% Corresponding Y generation
def Syn_Generation_Y3(X, W, sigma, eta):
# No of samples
n = len(X)
t = len(X[0][:,0])
d = int(len(X[0][0,:])/2)
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:d]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
#%% Cost Generation
Output_C = list()
Output_G = list()
for i in range(n):
Temp = X[i].copy()
Temp_Y = Output_Y[i]
Temp_G = X[i].copy()
for j in range(t):
if (Temp_Y[j] < 0.5):
Temp[j,:] = np.asarray([1,1,1,1,1,1,1,1,1,1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2])
Temp_G[j,:] = np.asarray([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
else:
Temp[j,:] = eta * np.asarray([1,1,1,1,1,1,1,1,1,1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2])
Temp_G[j,:] = np.asarray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Output_C.append(Temp)
Output_G.append(Temp_G)
return Output_Y, Output_C, Output_G | alg/asac/Data_Generation_Y.py |
import numpy as np
#%% Corresponding Y generation
def Syn_Generation_Y1(X, W, sigma):
# No of samples
n = len(X)
t = len(X[0][:,0])
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
return Output_Y
#%% Corresponding Y generation
def Syn_Generation_Y2(X, W, sigma):
# No of samples
n = len(X)
t = len(X[0][:,0])
d = int(len(X[0][0,:])/2)
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:d]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
return Output_Y
#%% Corresponding Y generation
def Syn_Generation_Y3(X, W, sigma, eta):
# No of samples
n = len(X)
t = len(X[0][:,0])
d = int(len(X[0][0,:])/2)
# Initialization
Output_Y = list()
for i in range(n):
Temp_X = X[i]
Temp_Y = np.zeros([t,])
for j in range(t):
Temp_Y[j] = np.exp(-np.abs(np.sum(W * Temp_X[j,:d]))) + np.random.normal(loc = 0, scale = sigma)
Output_Y.append(Temp_Y)
#%% Cost Generation
Output_C = list()
Output_G = list()
for i in range(n):
Temp = X[i].copy()
Temp_Y = Output_Y[i]
Temp_G = X[i].copy()
for j in range(t):
if (Temp_Y[j] < 0.5):
Temp[j,:] = np.asarray([1,1,1,1,1,1,1,1,1,1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2])
Temp_G[j,:] = np.asarray([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
else:
Temp[j,:] = eta * np.asarray([1,1,1,1,1,1,1,1,1,1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2])
Temp_G[j,:] = np.asarray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
Output_C.append(Temp)
Output_G.append(Temp_G)
return Output_Y, Output_C, Output_G | 0.276886 | 0.473353 |
from perceptron import *
import numpy as np
import copy
import time
import random
def rastrigin(x,y):
return 20+x**2-10*np.cos(2*np.pi*x)+y**2-10*np.cos(2*np.pi*y)
np.random.seed(7)
with open('training_data.csv','w') as f:
for i in range(1000):
x=np.random.sample()*4-2
y=np.random.sample()*4-2
print('{0},{1},{2}'.format(x,y,rastrigin(x,y)),file=f)
with open('test_data.csv','w') as f:
for i in range(1000):
x=np.random.sample()*4-2
y=np.random.sample()*4-2
print('{0},{1},{2}'.format(x,y,rastrigin(x,y)),file=f)
np.random.seed(7)
dataSet=np.loadtxt('training_data.csv',delimiter=',')
inputData = dataSet[:,0:2]
expected = dataSet[:,2]
testDataSet=np.loadtxt('test_data.csv',delimiter=',')
inputTestData = testDataSet[:,0:2]
expectedTestData = testDataSet[:,2]
INTERN_LAYERS=2
LAYERS=[20,10]
ACTIV_FUNCS=[Sigm()(1) for x in range(INTERN_LAYERS)]
ACTIV_FUNC_DERIVS=[Sigm().derivative(1) for x in range(INTERN_LAYERS)]
LEARN_RATES=[0.01 for x in range(INTERN_LAYERS)]
WEIGHTS=[None for x in range(INTERN_LAYERS)]
BIASES=[0.0 for x in range(INTERN_LAYERS)]
multOrig=Multilayer(
[2,*LAYERS,1],
[ident,*ACTIV_FUNCS,ident],
[zero,*ACTIV_FUNC_DERIVS,one],
[[1.0],*WEIGHTS,[1.0 for x in range(5)]],
[0.0,*LEARN_RATES,0.01],
[0.0,*BIASES,0.0]
)
error=1.0
while error>0.0001:
multilayer=copy.deepcopy(multOrig)
i=0
start=time.clock()
run=True
while(run):
samples=list(zip(inputData,expected))
while(run and samples):
inp=samples.pop(np.random.randint(0,len(samples)))
multilayer.learn(inp[0],[inp[1]])
results=[]
for inp in inputData:
results.extend(multilayer.process(inp))
error=MSE(results,expected)
if error<0.0001:
run=False
i+=1
print("{0:9};{1: 8.5f}".format(i,time.clock()-start),error,sep=';') | 03-siec/siec.py | from perceptron import *
import numpy as np
import copy
import time
import random
def rastrigin(x,y):
return 20+x**2-10*np.cos(2*np.pi*x)+y**2-10*np.cos(2*np.pi*y)
np.random.seed(7)
with open('training_data.csv','w') as f:
for i in range(1000):
x=np.random.sample()*4-2
y=np.random.sample()*4-2
print('{0},{1},{2}'.format(x,y,rastrigin(x,y)),file=f)
with open('test_data.csv','w') as f:
for i in range(1000):
x=np.random.sample()*4-2
y=np.random.sample()*4-2
print('{0},{1},{2}'.format(x,y,rastrigin(x,y)),file=f)
np.random.seed(7)
dataSet=np.loadtxt('training_data.csv',delimiter=',')
inputData = dataSet[:,0:2]
expected = dataSet[:,2]
testDataSet=np.loadtxt('test_data.csv',delimiter=',')
inputTestData = testDataSet[:,0:2]
expectedTestData = testDataSet[:,2]
INTERN_LAYERS=2
LAYERS=[20,10]
ACTIV_FUNCS=[Sigm()(1) for x in range(INTERN_LAYERS)]
ACTIV_FUNC_DERIVS=[Sigm().derivative(1) for x in range(INTERN_LAYERS)]
LEARN_RATES=[0.01 for x in range(INTERN_LAYERS)]
WEIGHTS=[None for x in range(INTERN_LAYERS)]
BIASES=[0.0 for x in range(INTERN_LAYERS)]
multOrig=Multilayer(
[2,*LAYERS,1],
[ident,*ACTIV_FUNCS,ident],
[zero,*ACTIV_FUNC_DERIVS,one],
[[1.0],*WEIGHTS,[1.0 for x in range(5)]],
[0.0,*LEARN_RATES,0.01],
[0.0,*BIASES,0.0]
)
error=1.0
while error>0.0001:
multilayer=copy.deepcopy(multOrig)
i=0
start=time.clock()
run=True
while(run):
samples=list(zip(inputData,expected))
while(run and samples):
inp=samples.pop(np.random.randint(0,len(samples)))
multilayer.learn(inp[0],[inp[1]])
results=[]
for inp in inputData:
results.extend(multilayer.process(inp))
error=MSE(results,expected)
if error<0.0001:
run=False
i+=1
print("{0:9};{1: 8.5f}".format(i,time.clock()-start),error,sep=';') | 0.196942 | 0.182353 |
import numpy as np
def lsfd(lambdak, f, frf):
"""
LSFD (Least-Squares Frequency domain) method is used in order
to determine the residues and mode shapes from complex natural frquencies
and the measured frequency response functions.
:param lambdak: a vector of selected complex natural frequencies
:param f: frequecy vector
:param frf: frequency response functions
:return: reconstructed FRF, modal constant(residue), lower residual, upper residual
"""
ni = frf.shape[0] # number of references
no = frf.shape[1] # number of responses
n = frf.shape[2] # length of frequency vector
nmodes = lambdak.shape[0] # number of modes
omega = 2 * np.pi * f # angular frequency
# Factors in the freqeuncy response function
b = 1 / np.subtract.outer(1j * omega, lambdak).T
c = 1 / np.subtract.outer(1j * omega, np.conj(lambdak)).T
# Separate complex data to real and imaginary part
hr = frf.real
hi = frf.imag
br = b.real
bi = b.imag
cr = c.real
ci = c.imag
# Stack the data together in order to obtain 2D matrix
hri = np.dstack((hr, hi))
bri = np.hstack((br+cr, bi+ci))
cri = np.hstack((-bi+ci, br-cr))
ur_multiplyer = np.ones(n)
ur_zeros = np.zeros(n)
lr_multiplyer = -1/(omega**2)
urr = np.hstack((ur_multiplyer, ur_zeros))
uri = np.hstack((ur_zeros, ur_multiplyer))
lrr = np.hstack((lr_multiplyer, ur_zeros))
lri = np.hstack((ur_zeros, lr_multiplyer))
bcri = np.vstack((bri, cri, urr, uri, lrr, lri))
# Reshape 3D array to 2D for least squares coputation
hri = hri.reshape(ni*no, 2*n)
# Compute the modal constants (residuals) and upper and lower residuals
uv, _, _, _ = np.linalg.lstsq(bcri.T,hri.T)
# Reshape 2D results to 3D
uv = uv.T.reshape(ni, no, 2*nmodes+4)
u = uv[:, :, :nmodes]
v = uv[:, :, nmodes:-4]
urr = uv[:, :, -4]
uri = uv[:, :, -3]
lrr = uv[:, :, -2]
lri = uv[:, :, -1]
a = u + 1j * v # Modal constant (residue)
ur = urr + 1j * uri # Upper residual
lr = lrr + 1j * lri # Lower residual
# Reconstructed FRF matrix
h = np.dot(uv, bcri)
h = h[:, :, :n] + 1j * h[:, :, n:]
return h, a, lr, ur | OpenModal/analysis/lsfd.py |
import numpy as np
def lsfd(lambdak, f, frf):
"""
LSFD (Least-Squares Frequency domain) method is used in order
to determine the residues and mode shapes from complex natural frquencies
and the measured frequency response functions.
:param lambdak: a vector of selected complex natural frequencies
:param f: frequecy vector
:param frf: frequency response functions
:return: reconstructed FRF, modal constant(residue), lower residual, upper residual
"""
ni = frf.shape[0] # number of references
no = frf.shape[1] # number of responses
n = frf.shape[2] # length of frequency vector
nmodes = lambdak.shape[0] # number of modes
omega = 2 * np.pi * f # angular frequency
# Factors in the freqeuncy response function
b = 1 / np.subtract.outer(1j * omega, lambdak).T
c = 1 / np.subtract.outer(1j * omega, np.conj(lambdak)).T
# Separate complex data to real and imaginary part
hr = frf.real
hi = frf.imag
br = b.real
bi = b.imag
cr = c.real
ci = c.imag
# Stack the data together in order to obtain 2D matrix
hri = np.dstack((hr, hi))
bri = np.hstack((br+cr, bi+ci))
cri = np.hstack((-bi+ci, br-cr))
ur_multiplyer = np.ones(n)
ur_zeros = np.zeros(n)
lr_multiplyer = -1/(omega**2)
urr = np.hstack((ur_multiplyer, ur_zeros))
uri = np.hstack((ur_zeros, ur_multiplyer))
lrr = np.hstack((lr_multiplyer, ur_zeros))
lri = np.hstack((ur_zeros, lr_multiplyer))
bcri = np.vstack((bri, cri, urr, uri, lrr, lri))
# Reshape 3D array to 2D for least squares coputation
hri = hri.reshape(ni*no, 2*n)
# Compute the modal constants (residuals) and upper and lower residuals
uv, _, _, _ = np.linalg.lstsq(bcri.T,hri.T)
# Reshape 2D results to 3D
uv = uv.T.reshape(ni, no, 2*nmodes+4)
u = uv[:, :, :nmodes]
v = uv[:, :, nmodes:-4]
urr = uv[:, :, -4]
uri = uv[:, :, -3]
lrr = uv[:, :, -2]
lri = uv[:, :, -1]
a = u + 1j * v # Modal constant (residue)
ur = urr + 1j * uri # Upper residual
lr = lrr + 1j * lri # Lower residual
# Reconstructed FRF matrix
h = np.dot(uv, bcri)
h = h[:, :, :n] + 1j * h[:, :, n:]
return h, a, lr, ur | 0.903598 | 0.723505 |
from __future__ import unicode_literals
import os
import threading
import time
import uuid
import xml.etree.ElementTree as ET
from django.contrib import auth
from django.contrib import messages
from django.contrib.auth.models import User
from django.core import signing
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response, HttpResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_protect
from easy_pdf.views import render_to_pdf_response
from selenium import webdriver
from stronghold.decorators import public
from archerysettings import load_settings, save_settings
from networkscanners.models import scan_save_db
from projects.models import project_db
from scanners.scanner_parser.web_scanner import zap_xml_parser, \
arachni_xml_parser, netsparker_xml_parser, webinspect_xml_parser, acunetix_xml_parser
from scanners.scanner_plugin.web_scanner import burp_plugin
from scanners.scanner_plugin.web_scanner import zap_plugin
from webscanners.models import zap_scan_results_db, \
zap_scans_db, \
zap_spider_db, \
zap_spider_results, \
cookie_db, excluded_db, \
burp_scan_db, burp_scan_result_db, \
arachni_scan_db, arachni_scan_result_db, \
task_schedule_db, \
acunetix_scan_db, acunetix_scan_result_db
from background_task import background
from datetime import datetime
from background_task.models import Task
import os
from jiraticketing.models import jirasetting
from webscanners.models import netsparker_scan_db, \
netsparker_scan_result_db, \
webinspect_scan_db, \
webinspect_scan_result_db
from webscanners.zapscanner.views import launch_zap_scan
from archerysettings.models import zap_settings_db, burp_setting_db, openvas_setting_db, nmap_vulners_setting_db
import hashlib
setting_file = os.getcwd() + '/' + 'apidata.json'
# All global variable
spider_status = "0"
scans_status = "0"
spider_alert = ""
target_url = ""
driver = ""
new_uri = ""
cookies = ""
excluded_url = ""
vul_col = ""
note = ""
rtt = ""
tags = ""
timestamp = ""
responseHeader = ""
requestBody = ""
responseBody = ""
requestHeader = ""
cookieParams = ""
res_type = ""
res_id = ""
alert = ""
project_id = None
target_url = None
scan_ip = None
burp_status = 0
serialNumber = ""
types = ""
name = ""
host = ""
path = ""
location = ""
severity = ""
confidence = ""
issueBackground = ""
remediationBackground = ""
references = ""
vulnerabilityClassifications = ""
issueDetail = ""
requestresponse = ""
vuln_id = ""
methods = ""
dec_res = ""
dec_req = ""
decd_req = ""
scanner = ""
all_scan_url = ""
all_url_vuln = ""
zap_apikey = None
zap_host = None
zap_port = None
# Login View
@public
@csrf_protect
def login(request):
"""
Login Request
:param request:
:return:
"""
c = {}
c.update(request)
return render(request, "login.html", c)
@public
def auth_view(request):
"""
Authentication request.
:param request:
:return:
"""
username = request.POST.get('username', '', )
password = request.POST.get('password', '', )
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
@public
def logout(request):
"""
Logout request
:param request:
:return:
"""
auth.logout(request)
return render_to_response("logout.html")
@public
def signup(request):
"""
Signup Request.
:param request:
:return:
"""
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
email = request.POST.get('email')
user = User.objects.create_user(username, email, password)
user.save()
return HttpResponseRedirect('/login/')
return render(request,
'signup.html')
def loggedin(request):
"""
After login request.
:param request:
:return:
"""
return render(request, 'webscanner.html')
def invalid_login():
"""
Validate user login.
:return:
"""
return render_to_response('invalid_login.html')
def index(request):
"""
The function calling web scan Page.
:param request:
:return:
"""
all_urls = zap_spider_db.objects.all()
all_scans = zap_scans_db.objects.all()
all_spider_results = zap_spider_results.objects.all()
all_excluded_url = excluded_db.objects.all()
all_cookies = cookie_db.objects.all()
all_scans_db = project_db.objects.all()
return render(request,
'webscanner.html',
{
'all_urls': all_urls,
'spider_status': spider_status,
'scans_status': scans_status,
'all_scans': all_scans,
'all_spider_results': all_spider_results,
'spider_alert': spider_alert,
'all_excluded_url': all_excluded_url,
'all_cookies': all_cookies,
'all_scans_db': all_scans_db
}
)
@background(schedule=60)
def task(target_url, project_id, scanner):
rescan_id = ''
rescan = 'No'
target__split = target_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'zap_scan':
thread = threading.Thread(
target=launch_zap_scan,
args=(target, project_id, rescan_id, rescan))
thread.daemon = True
thread.start()
elif scanner == 'burp_scan':
scan_id = uuid.uuid4()
do_scan = burp_plugin.burp_scans(
project_id,
target,
scan_id)
thread = threading.Thread(
target=do_scan.scan_launch,
)
thread.daemon = True
thread.start()
return HttpResponse(status=200)
def web_task_launch(request):
if request.method == 'GET':
task_time = request.GET['time']
t = Task.objects.all()
# t.delete()
print task_time
for ta in t:
print ta.run_at
print ta.id
return HttpResponse(status=200)
def web_scan_schedule(request):
"""
:param request:
:return:
"""
all_scans_db = project_db.objects.all()
all_scheduled_scans = task_schedule_db.objects.all()
if request.method == 'POST':
scan_url = request.POST.get('url')
scan_schedule_time = request.POST.get('datetime')
project_id = request.POST.get('project_id')
scanner = request.POST.get('scanner')
# periodic_task = request.POST.get('periodic_task')
periodic_task_value = request.POST.get('periodic_task_value')
# periodic_task = 'Yes'
print 'scanner-', scanner
if periodic_task_value == 'HOURLY':
periodic_time = Task.HOURLY
elif periodic_task_value == 'DAILY':
periodic_time = Task.DAILY
elif periodic_task_value == 'WEEKLY':
periodic_time = Task.WEEKLY
elif periodic_task_value == 'EVERY_2_WEEKS':
periodic_time = Task.EVERY_2_WEEKS
elif periodic_task_value == 'EVERY_4_WEEKS':
periodic_time = Task.EVERY_4_WEEKS
else:
periodic_time = None
dt_str = scan_schedule_time
dt_obj = datetime.strptime(dt_str, '%d/%m/%Y %H:%M:%S %p')
print "scan_url", scan_url
print "schedule", scan_schedule_time
# task(scan_url, project_id, schedule=dt_obj)
target__split = scan_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'zap_scan':
if periodic_task_value == 'None':
my_task = task(target, project_id, scanner, schedule=dt_obj)
task_id = my_task.id
print "Savedddddd taskid", task_id
else:
my_task = task(target, project_id, scanner, repeat=periodic_time, repeat_until=None)
task_id = my_task.id
print "Savedddddd taskid", task_id
elif scanner == 'burp_scan':
if periodic_task_value == 'None':
my_task = task(target, project_id, scanner, schedule=dt_obj)
task_id = my_task.id
else:
my_task = task(target, project_id, scanner, repeat=periodic_time, repeat_until=None)
task_id = my_task.id
print "Savedddddd taskid", task_id
save_scheadule = task_schedule_db(task_id=task_id, target=target,
schedule_time=scan_schedule_time,
project_id=project_id,
scanner=scanner,
periodic_task=periodic_task_value)
save_scheadule.save()
return render(request, 'web_scan_schedule.html',
{'all_scans_db': all_scans_db,
'all_scheduled_scans': all_scheduled_scans}
)
def del_web_scan_schedule(request):
"""
:param request:
:return:
"""
if request.method == "POST":
task_id = request.POST.get('task_id')
scan_item = str(task_id)
taskid = scan_item.replace(" ", "")
target_split = taskid.split(',')
split_length = target_split.__len__()
print "split_length", split_length
for i in range(0, split_length):
task_id = target_split.__getitem__(i)
del_task = task_schedule_db.objects.filter(task_id=task_id)
del_task.delete()
del_task_schedule = Task.objects.filter(id=task_id)
del_task_schedule.delete()
return HttpResponseRedirect('/webscanners/web_scan_schedule')
def setting(request):
"""
The function calling setting page.
:param request:
:return:
"""
jira_url = None
username = None
password = <PASSWORD>
# Loading settings
settings = load_settings.ArcherySettings(setting_file)
# Loading OpenVAS Settings
# ov_user = settings.openvas_username()
# ov_pass = settings.openvas_pass()
# ov_ip = settings.openvas_host()
lod_ov_user = settings.openvas_username()
lod_ov_pass = settings.openvas_pass()
lod_ov_host = settings.openvas_host()
lod_ov_port = settings.openvas_port()
lod_ov_enabled = settings.openvas_enabled()
# Loading ZAP Settings
zap_api_key = ''
zap_hosts = ''
zap_ports = ''
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zap_api_key = zap.zap_api
zap_hosts = zap.zap_url
zap_ports = zap.zap_port
lod_apikey = zap_api_key
zap_host = zap_hosts
zap_port = zap_ports
# Loading NMAP Vulners Settings
nv_enabled = False
nv_online = False
nv_version = False
nv_timing = 0
all_nv = nmap_vulners_setting_db.objects.all()
for nv in all_nv:
nv_enabled = bool(nv.enabled)
nv_online = bool(nv.online)
nv_version = bool(nv.version)
nv_timing = int(nv.timing)
# Loading Burp Settings
burp_host = settings.burp_host()
burp_port = settings.burp_port()
# Loading Email Settings
email_subject = settings.email_subject()
email_from = settings.email_from()
to_email = settings.email_to()
# Load JIRA Setting
jira_setting = jirasetting.objects.all()
for jira in jira_setting:
jira_url = jira.jira_server
username = jira.jira_username
password = <PASSWORD>
jira_server = jira_url
if username is None:
jira_username = None
else:
jira_username = signing.loads(username)
if password is None:
jira_password = None
else:
jira_password = signing.loads(password)
return render(request, 'setting.html',
{'apikey': lod_apikey,
'zapath': zap_host,
'zap_port': zap_port,
'lod_ov_user': lod_ov_user,
'lod_ov_pass': lod_ov_pass,
'lod_ov_host': lod_ov_host,
'lod_ov_enabled': lod_ov_enabled,
'lod_ov_port': lod_ov_port,
'burp_path': burp_host,
'burp_port': burp_port,
'email_subject': email_subject,
'email_from': email_from,
'to_email': to_email,
'jira_server': jira_server,
'jira_username': jira_username,
'jira_password': <PASSWORD>,
'nv_enabled': nv_enabled,
'nv_version': nv_version,
'nv_online': nv_online,
'nv_timing': nv_timing,
})
def email_setting(request):
"""
The function calling and updating Email Settings.
:param request:
:return:
"""
# Load Email Setting function
save_email_setting = save_settings.SaveSettings(setting_file)
if request.method == 'POST':
subject = request.POST.get("email_subject")
from_email = request.POST.get("from_email")
email_to = request.POST.get("to_email")
save_email_setting.save_email_settings(
email_subject=subject,
email_from=from_email,
email_to=email_to
)
return render(request, 'email_setting_form.html')
def burp_setting(request):
"""
Load Burp Settings.
:param request:
:return:
"""
burp_url = None
burp_port = None
all_burp_setting = burp_setting_db.objects.all()
for data in all_burp_setting:
global burp_url, burp_port
burp_url = data.burp_url
burp_port = data.burp_port
if request.method == 'POST':
burphost = request.POST.get("burpath")
burport = request.POST.get("burport")
save_burp_settings = burp_setting_db(burp_url=burphost, burp_port=burport)
save_burp_settings.save()
return HttpResponseRedirect('/webscanners/setting/')
return render(request, 'burp_setting_form.html', {'burp_url': burp_url, 'burp_port': burp_port})
def burp_scan_launch(request):
"""
Burp Scan Trigger.
:param request:
:return:
"""
global vuln_id, burp_status
if request.POST.get("url"):
target_url = request.POST.get('url')
project_id = request.POST.get('project_id')
target__split = target_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
print "Targets", target
scan_id = uuid.uuid4()
date_time = datetime.now()
scan_dump = burp_scan_db(scan_id=scan_id,
project_id=project_id,
url=target,
date_time=date_time)
scan_dump.save()
try:
do_scan = burp_plugin.burp_scans(
project_id,
target,
scan_id)
# do_scan.scan_lauch(project_id,
# target,
# scan_id)
thread = threading.Thread(
target=do_scan.scan_launch,
)
thread.daemon = True
thread.start()
time.sleep(5)
except Exception as e:
print e
return render(request, 'scan_list.html')
def xml_upload(request):
"""
Handling XML upload files.
:param request:
:return:
"""
all_project = project_db.objects.all()
if request.method == "POST":
project_id = request.POST.get("project_id")
scanner = request.POST.get("scanner")
xml_file = request.FILES['xmlfile']
scan_url = request.POST.get("scan_url")
scan_id = uuid.uuid4()
scan_status = "100"
if scanner == "zap_scan":
date_time = datetime.now()
scan_dump = zap_scans_db(scan_url=scan_url,
scan_scanid=scan_id,
date_time=date_time,
project_id=project_id,
vul_status=scan_status,
rescan='No')
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
zap_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
return HttpResponseRedirect("/webscanners/scans_list/")
elif scanner == "burp_scan":
date_time = datetime.now()
scan_dump = burp_scan_db(url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status)
scan_dump.save()
# Burp scan XML parser
tree = ET.parse(xml_file)
root_xml = tree.getroot()
do_xml_data = burp_plugin.burp_scans(project_id,
target_url,
scan_id)
do_xml_data.burp_scan_data(root_xml)
print "Save scan Data"
return HttpResponseRedirect("/webscanners/burp_scan_list")
elif scanner == "arachni":
date_time = datetime.now()
scan_dump = arachni_scan_db(url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
arachni_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print "Save scan Data"
return HttpResponseRedirect("/webscanners/arachni_scan_list")
elif scanner == 'netsparker':
date_time = datetime.now()
scan_dump = netsparker_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
netsparker_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/netsparker_scan_list/")
elif scanner == 'webinspect':
date_time = datetime.now()
scan_dump = webinspect_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
webinspect_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/webinspect_scan_list/")
elif scanner == 'acunetix':
date_time = datetime.now()
scan_dump = acunetix_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
acunetix_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/acunetix_scan_list/")
return render(request, 'upload_xml.html', {'all_project': all_project})
def add_cookies(request):
"""
Cookies storing into Archery Database.
:param request:
:return:
"""
if request.method == 'POST':
target_url = request.POST.get('url')
target_cookies = request.POST.get('cookies')
all_cookie_url = cookie_db.objects.filter(Q(url__icontains=target_url))
for da in all_cookie_url:
global cookies
cookies = da.url
if cookies == target_url:
cookie_db.objects.filter(Q(url__icontains=target_url)).update(cookie=target_cookies)
return HttpResponseRedirect("/webscanners/")
else:
data_dump = cookie_db(url=target_url,
cookie=target_cookies)
data_dump.save()
return HttpResponseRedirect("/webscanners/")
return render(request, 'cookie_add.html')
def slem(driver, url):
"""
Selenium calling function.
:param driver:
:param url:
:return:
"""
global new_uri
new_uri = url
try:
driver.get(url, )
except Exception as e:
print "Error Got !!!"
return
def save_cookie(driver):
"""
Cookie grabber.
:param driver:
:return:
"""
all_cookies = driver.get_cookies()
f = open('cookies.txt', 'w+')
for cookie in all_cookies:
cookie_value = cookie['name'] + '=' + cookie['value'] + ';'
f.write(cookie_value)
f.close()
driver.close()
return HttpResponseRedirect('/zapscanner/')
def cookies_list(request):
"""
:param request:
:return:
"""
all_cookies = cookie_db.objects.all()
return render(request, 'cookies_list.html', {'all_cookies': all_cookies})
def del_cookies(request):
if request.method == 'POST':
# cookie_id = request.POST.get('id')
cookie_url = request.POST.get('url')
cookies_item = str(cookie_url)
cooki_split = cookies_item.replace(" ", "")
target_split = cooki_split.split(',')
split_length = target_split.__len__()
print "split_length", split_length
for i in range(0, split_length):
cookies_target = target_split.__getitem__(i)
print(cookies_target)
del_cookie = cookie_db.objects.filter(url=cookies_target)
del_cookie.delete()
zap_plugin.zap_replacer(target_url=cookies_target)
return HttpResponseRedirect('/zapscanner/')
return render(request, 'cookies_list.html')
def sel_login(request):
"""
Lgoin perfrom using Selenium.
:param request:
:return:
"""
action_vul = request.POST.get("action", )
url_da = request.POST.get("url_login", )
# print(url_da)
if action_vul == "open_page":
global driver
driver = webdriver.Firefox()
slem(driver, url_da)
elif action_vul == "save_cookie":
save_cookie(driver)
read_f = open('cookies.txt', 'r')
for cookie_data in read_f:
# cookie_save = cookie_db(url=new_uri, cookie=cookie_data)
# cookie_save.save()
# target_url = request.POST.get('url')
# target_cookies = request.POST.get('cookies')
print(cookie_data)
all_cookie_url = cookie_db.objects.filter(Q(url__icontains=new_uri))
for da in all_cookie_url:
global cookies
cookies = da.url
if cookies == new_uri:
cookie_db.objects.filter(Q(url__icontains=new_uri)).update(cookie=cookie_data)
return HttpResponseRedirect("/zapscanner/")
else:
data_dump = cookie_db(url=new_uri,
cookie=cookie_data)
data_dump.save()
return HttpResponseRedirect("/zapscanner/")
messages.add_message(request, messages.SUCCESS, 'Cookies stored')
return HttpResponseRedirect('/zapscanner/')
return render(request, 'webscanner.html')
def exclude_url(request):
"""
Excluding URL from scanner. Save excluded URL in Archery Database.
:param request:
:return:
"""
exclud = request.POST.get("exclude_url", )
exclude_save = excluded_db(exclude_url=exclud)
exclude_save.save()
return render(request, 'webscanner.html', )
def exluded_url_list(request):
"""
:param request:
:return:
"""
all_excluded_url = excluded_db.objects.all()
if request.method == 'POST':
exclude_url = request.POST.get('exclude_url')
exluded_item = str(exclude_url)
exclude_split = exluded_item.replace(" ", "")
target_split = exclude_split.split(',')
split_length = target_split.__len__()
for i in range(0, split_length):
exclude_target = target_split.__getitem__(i)
del_excluded = excluded_db.objects.filter(exclude_url=exclude_target)
del_excluded.delete()
return HttpResponseRedirect('/zapscanner/excluded_url_list')
return render(request, 'excludedurl_list.html', {'all_excluded_url': all_excluded_url}) | webscanners/web_views.py |
from __future__ import unicode_literals
import os
import threading
import time
import uuid
import xml.etree.ElementTree as ET
from django.contrib import auth
from django.contrib import messages
from django.contrib.auth.models import User
from django.core import signing
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response, HttpResponse
from django.utils import timezone
from django.views.decorators.csrf import csrf_protect
from easy_pdf.views import render_to_pdf_response
from selenium import webdriver
from stronghold.decorators import public
from archerysettings import load_settings, save_settings
from networkscanners.models import scan_save_db
from projects.models import project_db
from scanners.scanner_parser.web_scanner import zap_xml_parser, \
arachni_xml_parser, netsparker_xml_parser, webinspect_xml_parser, acunetix_xml_parser
from scanners.scanner_plugin.web_scanner import burp_plugin
from scanners.scanner_plugin.web_scanner import zap_plugin
from webscanners.models import zap_scan_results_db, \
zap_scans_db, \
zap_spider_db, \
zap_spider_results, \
cookie_db, excluded_db, \
burp_scan_db, burp_scan_result_db, \
arachni_scan_db, arachni_scan_result_db, \
task_schedule_db, \
acunetix_scan_db, acunetix_scan_result_db
from background_task import background
from datetime import datetime
from background_task.models import Task
import os
from jiraticketing.models import jirasetting
from webscanners.models import netsparker_scan_db, \
netsparker_scan_result_db, \
webinspect_scan_db, \
webinspect_scan_result_db
from webscanners.zapscanner.views import launch_zap_scan
from archerysettings.models import zap_settings_db, burp_setting_db, openvas_setting_db, nmap_vulners_setting_db
import hashlib
setting_file = os.getcwd() + '/' + 'apidata.json'
# All global variable
spider_status = "0"
scans_status = "0"
spider_alert = ""
target_url = ""
driver = ""
new_uri = ""
cookies = ""
excluded_url = ""
vul_col = ""
note = ""
rtt = ""
tags = ""
timestamp = ""
responseHeader = ""
requestBody = ""
responseBody = ""
requestHeader = ""
cookieParams = ""
res_type = ""
res_id = ""
alert = ""
project_id = None
target_url = None
scan_ip = None
burp_status = 0
serialNumber = ""
types = ""
name = ""
host = ""
path = ""
location = ""
severity = ""
confidence = ""
issueBackground = ""
remediationBackground = ""
references = ""
vulnerabilityClassifications = ""
issueDetail = ""
requestresponse = ""
vuln_id = ""
methods = ""
dec_res = ""
dec_req = ""
decd_req = ""
scanner = ""
all_scan_url = ""
all_url_vuln = ""
zap_apikey = None
zap_host = None
zap_port = None
# Login View
@public
@csrf_protect
def login(request):
"""
Login Request
:param request:
:return:
"""
c = {}
c.update(request)
return render(request, "login.html", c)
@public
def auth_view(request):
"""
Authentication request.
:param request:
:return:
"""
username = request.POST.get('username', '', )
password = request.POST.get('password', '', )
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
@public
def logout(request):
"""
Logout request
:param request:
:return:
"""
auth.logout(request)
return render_to_response("logout.html")
@public
def signup(request):
"""
Signup Request.
:param request:
:return:
"""
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
email = request.POST.get('email')
user = User.objects.create_user(username, email, password)
user.save()
return HttpResponseRedirect('/login/')
return render(request,
'signup.html')
def loggedin(request):
"""
After login request.
:param request:
:return:
"""
return render(request, 'webscanner.html')
def invalid_login():
"""
Validate user login.
:return:
"""
return render_to_response('invalid_login.html')
def index(request):
"""
The function calling web scan Page.
:param request:
:return:
"""
all_urls = zap_spider_db.objects.all()
all_scans = zap_scans_db.objects.all()
all_spider_results = zap_spider_results.objects.all()
all_excluded_url = excluded_db.objects.all()
all_cookies = cookie_db.objects.all()
all_scans_db = project_db.objects.all()
return render(request,
'webscanner.html',
{
'all_urls': all_urls,
'spider_status': spider_status,
'scans_status': scans_status,
'all_scans': all_scans,
'all_spider_results': all_spider_results,
'spider_alert': spider_alert,
'all_excluded_url': all_excluded_url,
'all_cookies': all_cookies,
'all_scans_db': all_scans_db
}
)
@background(schedule=60)
def task(target_url, project_id, scanner):
rescan_id = ''
rescan = 'No'
target__split = target_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'zap_scan':
thread = threading.Thread(
target=launch_zap_scan,
args=(target, project_id, rescan_id, rescan))
thread.daemon = True
thread.start()
elif scanner == 'burp_scan':
scan_id = uuid.uuid4()
do_scan = burp_plugin.burp_scans(
project_id,
target,
scan_id)
thread = threading.Thread(
target=do_scan.scan_launch,
)
thread.daemon = True
thread.start()
return HttpResponse(status=200)
def web_task_launch(request):
if request.method == 'GET':
task_time = request.GET['time']
t = Task.objects.all()
# t.delete()
print task_time
for ta in t:
print ta.run_at
print ta.id
return HttpResponse(status=200)
def web_scan_schedule(request):
"""
:param request:
:return:
"""
all_scans_db = project_db.objects.all()
all_scheduled_scans = task_schedule_db.objects.all()
if request.method == 'POST':
scan_url = request.POST.get('url')
scan_schedule_time = request.POST.get('datetime')
project_id = request.POST.get('project_id')
scanner = request.POST.get('scanner')
# periodic_task = request.POST.get('periodic_task')
periodic_task_value = request.POST.get('periodic_task_value')
# periodic_task = 'Yes'
print 'scanner-', scanner
if periodic_task_value == 'HOURLY':
periodic_time = Task.HOURLY
elif periodic_task_value == 'DAILY':
periodic_time = Task.DAILY
elif periodic_task_value == 'WEEKLY':
periodic_time = Task.WEEKLY
elif periodic_task_value == 'EVERY_2_WEEKS':
periodic_time = Task.EVERY_2_WEEKS
elif periodic_task_value == 'EVERY_4_WEEKS':
periodic_time = Task.EVERY_4_WEEKS
else:
periodic_time = None
dt_str = scan_schedule_time
dt_obj = datetime.strptime(dt_str, '%d/%m/%Y %H:%M:%S %p')
print "scan_url", scan_url
print "schedule", scan_schedule_time
# task(scan_url, project_id, schedule=dt_obj)
target__split = scan_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
if scanner == 'zap_scan':
if periodic_task_value == 'None':
my_task = task(target, project_id, scanner, schedule=dt_obj)
task_id = my_task.id
print "Savedddddd taskid", task_id
else:
my_task = task(target, project_id, scanner, repeat=periodic_time, repeat_until=None)
task_id = my_task.id
print "Savedddddd taskid", task_id
elif scanner == 'burp_scan':
if periodic_task_value == 'None':
my_task = task(target, project_id, scanner, schedule=dt_obj)
task_id = my_task.id
else:
my_task = task(target, project_id, scanner, repeat=periodic_time, repeat_until=None)
task_id = my_task.id
print "Savedddddd taskid", task_id
save_scheadule = task_schedule_db(task_id=task_id, target=target,
schedule_time=scan_schedule_time,
project_id=project_id,
scanner=scanner,
periodic_task=periodic_task_value)
save_scheadule.save()
return render(request, 'web_scan_schedule.html',
{'all_scans_db': all_scans_db,
'all_scheduled_scans': all_scheduled_scans}
)
def del_web_scan_schedule(request):
"""
:param request:
:return:
"""
if request.method == "POST":
task_id = request.POST.get('task_id')
scan_item = str(task_id)
taskid = scan_item.replace(" ", "")
target_split = taskid.split(',')
split_length = target_split.__len__()
print "split_length", split_length
for i in range(0, split_length):
task_id = target_split.__getitem__(i)
del_task = task_schedule_db.objects.filter(task_id=task_id)
del_task.delete()
del_task_schedule = Task.objects.filter(id=task_id)
del_task_schedule.delete()
return HttpResponseRedirect('/webscanners/web_scan_schedule')
def setting(request):
"""
The function calling setting page.
:param request:
:return:
"""
jira_url = None
username = None
password = <PASSWORD>
# Loading settings
settings = load_settings.ArcherySettings(setting_file)
# Loading OpenVAS Settings
# ov_user = settings.openvas_username()
# ov_pass = settings.openvas_pass()
# ov_ip = settings.openvas_host()
lod_ov_user = settings.openvas_username()
lod_ov_pass = settings.openvas_pass()
lod_ov_host = settings.openvas_host()
lod_ov_port = settings.openvas_port()
lod_ov_enabled = settings.openvas_enabled()
# Loading ZAP Settings
zap_api_key = ''
zap_hosts = ''
zap_ports = ''
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zap_api_key = zap.zap_api
zap_hosts = zap.zap_url
zap_ports = zap.zap_port
lod_apikey = zap_api_key
zap_host = zap_hosts
zap_port = zap_ports
# Loading NMAP Vulners Settings
nv_enabled = False
nv_online = False
nv_version = False
nv_timing = 0
all_nv = nmap_vulners_setting_db.objects.all()
for nv in all_nv:
nv_enabled = bool(nv.enabled)
nv_online = bool(nv.online)
nv_version = bool(nv.version)
nv_timing = int(nv.timing)
# Loading Burp Settings
burp_host = settings.burp_host()
burp_port = settings.burp_port()
# Loading Email Settings
email_subject = settings.email_subject()
email_from = settings.email_from()
to_email = settings.email_to()
# Load JIRA Setting
jira_setting = jirasetting.objects.all()
for jira in jira_setting:
jira_url = jira.jira_server
username = jira.jira_username
password = <PASSWORD>
jira_server = jira_url
if username is None:
jira_username = None
else:
jira_username = signing.loads(username)
if password is None:
jira_password = None
else:
jira_password = signing.loads(password)
return render(request, 'setting.html',
{'apikey': lod_apikey,
'zapath': zap_host,
'zap_port': zap_port,
'lod_ov_user': lod_ov_user,
'lod_ov_pass': lod_ov_pass,
'lod_ov_host': lod_ov_host,
'lod_ov_enabled': lod_ov_enabled,
'lod_ov_port': lod_ov_port,
'burp_path': burp_host,
'burp_port': burp_port,
'email_subject': email_subject,
'email_from': email_from,
'to_email': to_email,
'jira_server': jira_server,
'jira_username': jira_username,
'jira_password': <PASSWORD>,
'nv_enabled': nv_enabled,
'nv_version': nv_version,
'nv_online': nv_online,
'nv_timing': nv_timing,
})
def email_setting(request):
"""
The function calling and updating Email Settings.
:param request:
:return:
"""
# Load Email Setting function
save_email_setting = save_settings.SaveSettings(setting_file)
if request.method == 'POST':
subject = request.POST.get("email_subject")
from_email = request.POST.get("from_email")
email_to = request.POST.get("to_email")
save_email_setting.save_email_settings(
email_subject=subject,
email_from=from_email,
email_to=email_to
)
return render(request, 'email_setting_form.html')
def burp_setting(request):
"""
Load Burp Settings.
:param request:
:return:
"""
burp_url = None
burp_port = None
all_burp_setting = burp_setting_db.objects.all()
for data in all_burp_setting:
global burp_url, burp_port
burp_url = data.burp_url
burp_port = data.burp_port
if request.method == 'POST':
burphost = request.POST.get("burpath")
burport = request.POST.get("burport")
save_burp_settings = burp_setting_db(burp_url=burphost, burp_port=burport)
save_burp_settings.save()
return HttpResponseRedirect('/webscanners/setting/')
return render(request, 'burp_setting_form.html', {'burp_url': burp_url, 'burp_port': burp_port})
def burp_scan_launch(request):
"""
Burp Scan Trigger.
:param request:
:return:
"""
global vuln_id, burp_status
if request.POST.get("url"):
target_url = request.POST.get('url')
project_id = request.POST.get('project_id')
target__split = target_url.split(',')
split_length = target__split.__len__()
for i in range(0, split_length):
target = target__split.__getitem__(i)
print "Targets", target
scan_id = uuid.uuid4()
date_time = datetime.now()
scan_dump = burp_scan_db(scan_id=scan_id,
project_id=project_id,
url=target,
date_time=date_time)
scan_dump.save()
try:
do_scan = burp_plugin.burp_scans(
project_id,
target,
scan_id)
# do_scan.scan_lauch(project_id,
# target,
# scan_id)
thread = threading.Thread(
target=do_scan.scan_launch,
)
thread.daemon = True
thread.start()
time.sleep(5)
except Exception as e:
print e
return render(request, 'scan_list.html')
def xml_upload(request):
"""
Handling XML upload files.
:param request:
:return:
"""
all_project = project_db.objects.all()
if request.method == "POST":
project_id = request.POST.get("project_id")
scanner = request.POST.get("scanner")
xml_file = request.FILES['xmlfile']
scan_url = request.POST.get("scan_url")
scan_id = uuid.uuid4()
scan_status = "100"
if scanner == "zap_scan":
date_time = datetime.now()
scan_dump = zap_scans_db(scan_url=scan_url,
scan_scanid=scan_id,
date_time=date_time,
project_id=project_id,
vul_status=scan_status,
rescan='No')
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
zap_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
return HttpResponseRedirect("/webscanners/scans_list/")
elif scanner == "burp_scan":
date_time = datetime.now()
scan_dump = burp_scan_db(url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status)
scan_dump.save()
# Burp scan XML parser
tree = ET.parse(xml_file)
root_xml = tree.getroot()
do_xml_data = burp_plugin.burp_scans(project_id,
target_url,
scan_id)
do_xml_data.burp_scan_data(root_xml)
print "Save scan Data"
return HttpResponseRedirect("/webscanners/burp_scan_list")
elif scanner == "arachni":
date_time = datetime.now()
scan_dump = arachni_scan_db(url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
arachni_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print "Save scan Data"
return HttpResponseRedirect("/webscanners/arachni_scan_list")
elif scanner == 'netsparker':
date_time = datetime.now()
scan_dump = netsparker_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
netsparker_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/netsparker_scan_list/")
elif scanner == 'webinspect':
date_time = datetime.now()
scan_dump = webinspect_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
webinspect_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/webinspect_scan_list/")
elif scanner == 'acunetix':
date_time = datetime.now()
scan_dump = acunetix_scan_db(
url=scan_url,
scan_id=scan_id,
date_time=date_time,
project_id=project_id,
scan_status=scan_status
)
scan_dump.save()
tree = ET.parse(xml_file)
root_xml = tree.getroot()
acunetix_xml_parser.xml_parser(project_id=project_id,
scan_id=scan_id,
root=root_xml)
print("Saved scan data")
return HttpResponseRedirect("/webscanners/acunetix_scan_list/")
return render(request, 'upload_xml.html', {'all_project': all_project})
def add_cookies(request):
"""
Cookies storing into Archery Database.
:param request:
:return:
"""
if request.method == 'POST':
target_url = request.POST.get('url')
target_cookies = request.POST.get('cookies')
all_cookie_url = cookie_db.objects.filter(Q(url__icontains=target_url))
for da in all_cookie_url:
global cookies
cookies = da.url
if cookies == target_url:
cookie_db.objects.filter(Q(url__icontains=target_url)).update(cookie=target_cookies)
return HttpResponseRedirect("/webscanners/")
else:
data_dump = cookie_db(url=target_url,
cookie=target_cookies)
data_dump.save()
return HttpResponseRedirect("/webscanners/")
return render(request, 'cookie_add.html')
def slem(driver, url):
"""
Selenium calling function.
:param driver:
:param url:
:return:
"""
global new_uri
new_uri = url
try:
driver.get(url, )
except Exception as e:
print "Error Got !!!"
return
def save_cookie(driver):
"""
Cookie grabber.
:param driver:
:return:
"""
all_cookies = driver.get_cookies()
f = open('cookies.txt', 'w+')
for cookie in all_cookies:
cookie_value = cookie['name'] + '=' + cookie['value'] + ';'
f.write(cookie_value)
f.close()
driver.close()
return HttpResponseRedirect('/zapscanner/')
def cookies_list(request):
"""
:param request:
:return:
"""
all_cookies = cookie_db.objects.all()
return render(request, 'cookies_list.html', {'all_cookies': all_cookies})
def del_cookies(request):
if request.method == 'POST':
# cookie_id = request.POST.get('id')
cookie_url = request.POST.get('url')
cookies_item = str(cookie_url)
cooki_split = cookies_item.replace(" ", "")
target_split = cooki_split.split(',')
split_length = target_split.__len__()
print "split_length", split_length
for i in range(0, split_length):
cookies_target = target_split.__getitem__(i)
print(cookies_target)
del_cookie = cookie_db.objects.filter(url=cookies_target)
del_cookie.delete()
zap_plugin.zap_replacer(target_url=cookies_target)
return HttpResponseRedirect('/zapscanner/')
return render(request, 'cookies_list.html')
def sel_login(request):
"""
Lgoin perfrom using Selenium.
:param request:
:return:
"""
action_vul = request.POST.get("action", )
url_da = request.POST.get("url_login", )
# print(url_da)
if action_vul == "open_page":
global driver
driver = webdriver.Firefox()
slem(driver, url_da)
elif action_vul == "save_cookie":
save_cookie(driver)
read_f = open('cookies.txt', 'r')
for cookie_data in read_f:
# cookie_save = cookie_db(url=new_uri, cookie=cookie_data)
# cookie_save.save()
# target_url = request.POST.get('url')
# target_cookies = request.POST.get('cookies')
print(cookie_data)
all_cookie_url = cookie_db.objects.filter(Q(url__icontains=new_uri))
for da in all_cookie_url:
global cookies
cookies = da.url
if cookies == new_uri:
cookie_db.objects.filter(Q(url__icontains=new_uri)).update(cookie=cookie_data)
return HttpResponseRedirect("/zapscanner/")
else:
data_dump = cookie_db(url=new_uri,
cookie=cookie_data)
data_dump.save()
return HttpResponseRedirect("/zapscanner/")
messages.add_message(request, messages.SUCCESS, 'Cookies stored')
return HttpResponseRedirect('/zapscanner/')
return render(request, 'webscanner.html')
def exclude_url(request):
"""
Excluding URL from scanner. Save excluded URL in Archery Database.
:param request:
:return:
"""
exclud = request.POST.get("exclude_url", )
exclude_save = excluded_db(exclude_url=exclud)
exclude_save.save()
return render(request, 'webscanner.html', )
def exluded_url_list(request):
"""
:param request:
:return:
"""
all_excluded_url = excluded_db.objects.all()
if request.method == 'POST':
exclude_url = request.POST.get('exclude_url')
exluded_item = str(exclude_url)
exclude_split = exluded_item.replace(" ", "")
target_split = exclude_split.split(',')
split_length = target_split.__len__()
for i in range(0, split_length):
exclude_target = target_split.__getitem__(i)
del_excluded = excluded_db.objects.filter(exclude_url=exclude_target)
del_excluded.delete()
return HttpResponseRedirect('/zapscanner/excluded_url_list')
return render(request, 'excludedurl_list.html', {'all_excluded_url': all_excluded_url}) | 0.408159 | 0.041346 |
import asyncio
from concurrent.futures import ProcessPoolExecutor
import pickle
import time
from thrift.perf.load.services import LoadTestInterface
from thrift.perf.load.types import LoadError
def us_to_sec(microseconds):
return microseconds / 1000000
def burn_in_executor(us):
start = time.time()
end = start + us_to_sec(us)
while time.time() < end:
pass
class LoadTestHandler(LoadTestInterface):
def __init__(self, loop=None):
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.pool = ProcessPoolExecutor()
pickle.DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
async def noop(self):
pass
async def onewayNoop(self):
pass
async def asyncNoop(self):
pass
async def sleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def onewaySleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def burn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def onewayBurn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def badSleep(self, us):
# "bad" because it sleeps on the main thread
time.sleep(us_to_sec(us))
async def badBurn(self, us):
return burn_in_executor(us)
async def throwError(self, code):
raise LoadError(code=code)
async def throwUnexpected(self, code):
raise LoadError(code=code)
async def send(self, data):
pass
async def onewaySend(self, data):
pass
async def recv(self, bytes):
return 'a' * bytes
async def sendrecv(self, data, recvBytes):
return 'a' * recvBytes
async def echo(self, data):
return data
async def add(self, a, b):
return a + b
async def largeContainer(self, data):
pass
async def iterAllFields(self, data):
for item in data:
_ = item.stringField
for _ in item.stringList:
pass
return data | thrift/perf/py3/load_handler.py | import asyncio
from concurrent.futures import ProcessPoolExecutor
import pickle
import time
from thrift.perf.load.services import LoadTestInterface
from thrift.perf.load.types import LoadError
def us_to_sec(microseconds):
return microseconds / 1000000
def burn_in_executor(us):
start = time.time()
end = start + us_to_sec(us)
while time.time() < end:
pass
class LoadTestHandler(LoadTestInterface):
def __init__(self, loop=None):
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.pool = ProcessPoolExecutor()
pickle.DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
async def noop(self):
pass
async def onewayNoop(self):
pass
async def asyncNoop(self):
pass
async def sleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def onewaySleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def burn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def onewayBurn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def badSleep(self, us):
# "bad" because it sleeps on the main thread
time.sleep(us_to_sec(us))
async def badBurn(self, us):
return burn_in_executor(us)
async def throwError(self, code):
raise LoadError(code=code)
async def throwUnexpected(self, code):
raise LoadError(code=code)
async def send(self, data):
pass
async def onewaySend(self, data):
pass
async def recv(self, bytes):
return 'a' * bytes
async def sendrecv(self, data, recvBytes):
return 'a' * recvBytes
async def echo(self, data):
return data
async def add(self, a, b):
return a + b
async def largeContainer(self, data):
pass
async def iterAllFields(self, data):
for item in data:
_ = item.stringField
for _ in item.stringList:
pass
return data | 0.455441 | 0.235218 |
import json
import os
import mimetypes
from uuid import UUID
import requests
from typing import Dict, List, Optional
from autoretouch_api_client.model import (
ApiConfig, Organization, Page, Workflow, DeviceCodeResponse, AccessTokenResponse, WorkflowExecution)
DEFAULT_API_CONFIG = ApiConfig(
BASE_API_URL="https://api.autoretouch.com",
BASE_API_URL_CURRENT="https://api.autoretouch.com/v1",
CLIENT_ID="V8EkfbxtBi93cAySTVWAecEum4d6pt4J",
SCOPE="offline_access",
AUDIENCE="https://api.autoretouch.com",
AUTH_DOMAIN="https://auth.autoretouch.com"
)
DEFAULT_USER_AGENT = "Autoretouch-Python-Api-Client-0.0.1"
class AutoretouchClient:
def __init__(self, user_agent: str = DEFAULT_USER_AGENT, api_config: ApiConfig = DEFAULT_API_CONFIG):
self.USER_AGENT = user_agent
self.API_CONFIG = api_config
def get_api_status(self) -> int:
return requests.get(f"{self.API_CONFIG.BASE_API_URL}/health").status_code
def get_api_status_current(self, ) -> int:
return requests.get(f"{self.API_CONFIG.BASE_API_URL_CURRENT}/health").status_code
def get_device_code(self) -> DeviceCodeResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/device/code"
payload = f"client_id={self.API_CONFIG.CLIENT_ID}&scope={self.API_CONFIG.SCOPE}&audience={self.API_CONFIG.AUDIENCE}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return DeviceCodeResponse(**response.json())
def get_access_and_refresh_token(self, device_code: str) -> AccessTokenResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/token"
payload = f"grant_type=urn:ietf:params:oauth:grant-type:device_code" \
f"&device_code={device_code}" \
f"&client_id={self.API_CONFIG.CLIENT_ID}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return AccessTokenResponse(**response.json())
def get_refreshed_access_token(self, refresh_token: str) -> AccessTokenResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/token"
payload = f"grant_type=refresh_token" \
f"&refresh_token={refresh_token}" \
f"&client_id={self.API_CONFIG.CLIENT_ID}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return AccessTokenResponse(**response.json(), refresh_token=refresh_token)
def revoke_refresh_token(self, refresh_token: str) -> int:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/revoke"
payload = {
"client_id": self.API_CONFIG.CLIENT_ID,
"token": refresh_token
}
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/json"}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
return response.status_code
def get_organizations(self, access_token: str) -> List[Organization]:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/organization?limit=50&offset=0"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
organizations = [Organization(**entry) for entry in page.entries]
return organizations
def get_workflows(self, access_token: str, organization_id: UUID) -> List[Workflow]:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow?limit=50&offset=0&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
workflows = [Workflow(**entry) for entry in page.entries]
return workflows
def get_workflow_executions(self, access_token: str, organization_id: UUID, workflow_id: UUID) -> Page:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution?workflow={workflow_id}&limit=50&offset=0&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
page.entries = [WorkflowExecution(**entry) for entry in page.entries]
return page
def upload_image(self, access_token: str, organization_id: UUID, filepath: str) -> str:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/upload?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
with open(filepath, 'rb') as file:
filename = os.path.basename(file.name)
mimetype, _ = mimetypes.guess_type(file.name)
files = [('file', (filename, file, mimetype))]
response = requests.post(url=url, headers=headers, files=files)
self.__assert_response_ok(response)
return response.content.decode(response.encoding)
def download_image(self, access_token: str, organization_id: UUID, content_hash: str, output_filename: str) -> bytes:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/image/{content_hash}/{output_filename}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def create_workflow_execution_for_image_reference(
self, access_token: str, workflow_id: UUID, workflow_version_id: Optional[UUID], organization_id: UUID,
image_content_hash: str, image_name: str, mimetype: str, labels: Dict[str, str]) -> UUID:
version_str = f"&version={workflow_version_id}" if workflow_version_id else ""
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/create" \
f"?workflow={workflow_id}" \
f"{version_str}" \
f"&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "application/json"}
payload = {
"image": {
"name": image_name,
"contentHash": image_content_hash,
"contentType": mimetype
},
"labels": labels
}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
return UUID(response.content.decode(response.encoding))
def create_workflow_execution_for_image_file(
self, access_token: str, workflow_id: UUID, workflow_version_id: Optional[UUID], organization_id: UUID,
filepath: str, labels: Dict[str, str]) -> UUID:
labels_encoded = "".join([f"&label[{key}]={value}" for key, value in labels.items()])
version_str = f"&version={workflow_version_id}" if workflow_version_id else ""
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/create" \
f"?workflow={workflow_id}" \
f"{version_str}" \
f"&organization={organization_id}" \
f"{labels_encoded}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
with open(filepath, 'rb') as file:
filename = os.path.basename(file.name)
mimetype, _ = mimetypes.guess_type(file.name)
files = [('file', (filename, file, mimetype))]
response = requests.post(url=url, headers=headers, files=files)
self.__assert_response_ok(response)
return UUID(response.content.decode(response.encoding))
def get_workflow_execution_details(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> WorkflowExecution:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return WorkflowExecution(**response.json())
def get_workflow_execution_status_blocking(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> str:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/status?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "text/event-stream"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
# TODO: decode event stream format
return response.content.decode(response.encoding)
def download_workflow_execution_result_blocking(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> bytes:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/result/default?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def download_workflow_execution_result(self, access_token: str, organization_id: UUID, result_path: str) -> bytes:
assert result_path.startswith("/image/")
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}{result_path}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def retry_workflow_execution(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> int:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/retry?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
return requests.post(url=url, headers=headers, data={}).status_code
def send_feedback(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID, thumbs_up: bool,
expected_images_content_hashes: List[str] = []):
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/feedback?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
payload = {
"thumbsUp": thumbs_up,
"expectedImages": expected_images_content_hashes
}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
@staticmethod
def __assert_response_ok(response):
if response.status_code != 200 and response.status_code != 201:
raise RuntimeError(f"API responded with Status Code {response.status_code}, reason: {response.reason}") | autoretouch_api_client/client.py | import json
import os
import mimetypes
from uuid import UUID
import requests
from typing import Dict, List, Optional
from autoretouch_api_client.model import (
ApiConfig, Organization, Page, Workflow, DeviceCodeResponse, AccessTokenResponse, WorkflowExecution)
DEFAULT_API_CONFIG = ApiConfig(
BASE_API_URL="https://api.autoretouch.com",
BASE_API_URL_CURRENT="https://api.autoretouch.com/v1",
CLIENT_ID="V8EkfbxtBi93cAySTVWAecEum4d6pt4J",
SCOPE="offline_access",
AUDIENCE="https://api.autoretouch.com",
AUTH_DOMAIN="https://auth.autoretouch.com"
)
DEFAULT_USER_AGENT = "Autoretouch-Python-Api-Client-0.0.1"
class AutoretouchClient:
def __init__(self, user_agent: str = DEFAULT_USER_AGENT, api_config: ApiConfig = DEFAULT_API_CONFIG):
self.USER_AGENT = user_agent
self.API_CONFIG = api_config
def get_api_status(self) -> int:
return requests.get(f"{self.API_CONFIG.BASE_API_URL}/health").status_code
def get_api_status_current(self, ) -> int:
return requests.get(f"{self.API_CONFIG.BASE_API_URL_CURRENT}/health").status_code
def get_device_code(self) -> DeviceCodeResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/device/code"
payload = f"client_id={self.API_CONFIG.CLIENT_ID}&scope={self.API_CONFIG.SCOPE}&audience={self.API_CONFIG.AUDIENCE}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return DeviceCodeResponse(**response.json())
def get_access_and_refresh_token(self, device_code: str) -> AccessTokenResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/token"
payload = f"grant_type=urn:ietf:params:oauth:grant-type:device_code" \
f"&device_code={device_code}" \
f"&client_id={self.API_CONFIG.CLIENT_ID}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return AccessTokenResponse(**response.json())
def get_refreshed_access_token(self, refresh_token: str) -> AccessTokenResponse:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/token"
payload = f"grant_type=refresh_token" \
f"&refresh_token={refresh_token}" \
f"&client_id={self.API_CONFIG.CLIENT_ID}"
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url=url, headers=headers, data=payload)
self.__assert_response_ok(response)
return AccessTokenResponse(**response.json(), refresh_token=refresh_token)
def revoke_refresh_token(self, refresh_token: str) -> int:
url = f"{self.API_CONFIG.AUTH_DOMAIN}/oauth/revoke"
payload = {
"client_id": self.API_CONFIG.CLIENT_ID,
"token": refresh_token
}
headers = {"User-Agent": self.USER_AGENT, "Content-Type": "application/json"}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
return response.status_code
def get_organizations(self, access_token: str) -> List[Organization]:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/organization?limit=50&offset=0"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
organizations = [Organization(**entry) for entry in page.entries]
return organizations
def get_workflows(self, access_token: str, organization_id: UUID) -> List[Workflow]:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow?limit=50&offset=0&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
workflows = [Workflow(**entry) for entry in page.entries]
return workflows
def get_workflow_executions(self, access_token: str, organization_id: UUID, workflow_id: UUID) -> Page:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution?workflow={workflow_id}&limit=50&offset=0&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
page = Page(**response.json())
page.entries = [WorkflowExecution(**entry) for entry in page.entries]
return page
def upload_image(self, access_token: str, organization_id: UUID, filepath: str) -> str:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/upload?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
with open(filepath, 'rb') as file:
filename = os.path.basename(file.name)
mimetype, _ = mimetypes.guess_type(file.name)
files = [('file', (filename, file, mimetype))]
response = requests.post(url=url, headers=headers, files=files)
self.__assert_response_ok(response)
return response.content.decode(response.encoding)
def download_image(self, access_token: str, organization_id: UUID, content_hash: str, output_filename: str) -> bytes:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/image/{content_hash}/{output_filename}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def create_workflow_execution_for_image_reference(
self, access_token: str, workflow_id: UUID, workflow_version_id: Optional[UUID], organization_id: UUID,
image_content_hash: str, image_name: str, mimetype: str, labels: Dict[str, str]) -> UUID:
version_str = f"&version={workflow_version_id}" if workflow_version_id else ""
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/create" \
f"?workflow={workflow_id}" \
f"{version_str}" \
f"&organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "application/json"}
payload = {
"image": {
"name": image_name,
"contentHash": image_content_hash,
"contentType": mimetype
},
"labels": labels
}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
return UUID(response.content.decode(response.encoding))
def create_workflow_execution_for_image_file(
self, access_token: str, workflow_id: UUID, workflow_version_id: Optional[UUID], organization_id: UUID,
filepath: str, labels: Dict[str, str]) -> UUID:
labels_encoded = "".join([f"&label[{key}]={value}" for key, value in labels.items()])
version_str = f"&version={workflow_version_id}" if workflow_version_id else ""
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/create" \
f"?workflow={workflow_id}" \
f"{version_str}" \
f"&organization={organization_id}" \
f"{labels_encoded}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
with open(filepath, 'rb') as file:
filename = os.path.basename(file.name)
mimetype, _ = mimetypes.guess_type(file.name)
files = [('file', (filename, file, mimetype))]
response = requests.post(url=url, headers=headers, files=files)
self.__assert_response_ok(response)
return UUID(response.content.decode(response.encoding))
def get_workflow_execution_details(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> WorkflowExecution:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "json"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return WorkflowExecution(**response.json())
def get_workflow_execution_status_blocking(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> str:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/status?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}", "Content-Type": "text/event-stream"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
# TODO: decode event stream format
return response.content.decode(response.encoding)
def download_workflow_execution_result_blocking(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> bytes:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/result/default?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def download_workflow_execution_result(self, access_token: str, organization_id: UUID, result_path: str) -> bytes:
assert result_path.startswith("/image/")
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}{result_path}?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}"}
response = requests.get(url=url, headers=headers)
self.__assert_response_ok(response)
return response.content
def retry_workflow_execution(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID) -> int:
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/retry?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
return requests.post(url=url, headers=headers, data={}).status_code
def send_feedback(self, access_token: str, organization_id: UUID, workflow_execution_id: UUID, thumbs_up: bool,
expected_images_content_hashes: List[str] = []):
url = f"{self.API_CONFIG.BASE_API_URL_CURRENT}/workflow/execution/{workflow_execution_id}/feedback?organization={organization_id}"
headers = {"User-Agent": self.USER_AGENT, "Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"}
payload = {
"thumbsUp": thumbs_up,
"expectedImages": expected_images_content_hashes
}
response = requests.post(url=url, headers=headers, data=json.dumps(payload))
self.__assert_response_ok(response)
@staticmethod
def __assert_response_ok(response):
if response.status_code != 200 and response.status_code != 201:
raise RuntimeError(f"API responded with Status Code {response.status_code}, reason: {response.reason}") | 0.670824 | 0.098903 |
from sly import Lexer
r_ID = r'[a-zA-Z_][a-zA-Z0-9_]*'
r_KW = r_ID + r':'
r_CHAR = r'\S'
r_OPCHAR = r'[\!\@\#\$\%\^\&\*\-\+\=\~\/\?\<\>\,\;\|\‘\\]'
r_SQ_STRING = r"\'(\\\'|[^\'])*\'"
r_DQ_STRING = r'\"(\\\"|[^\"])*\"'
r_BQ_STRING = r'\`(\\\`|[^\`])*\`'
r_BIN_DIGIT = r'[0-1]'
r_OCT_DIGIT = r'[0-7]'
r_DEC_DIGIT = r'[0-9]'
r_HEX_DIGIT = r'[0-9A-Za-z]'
r_HEX_NUMBER = r'0[xX]' + r_HEX_DIGIT + r'+'
r_OCT_NUMBER = r'0[oO]' + r_OCT_DIGIT + r'+'
r_BIN_NUMBER = r'0[bB]' + r_BIN_DIGIT + r'+'
r_INTEGER = r'\-?' + r_DEC_DIGIT + r'+'
r_FLOAT = r'\-?' + r_DEC_DIGIT + r'*\.' + r_INTEGER
class SPLexer(Lexer):
tokens = {
# Literals
LITERAL_HEX, LITERAL_OCT, LITERAL_OCT, LITERAL_BIN, LITERAL_DEC, LITERAL_FLOAT,
LITERAL_STRING, LITERAL_COMMENT, LITERAL_CHAR,
# SYMBOLS
SYMBOL_KW_SELECTOR, SYMBOL_OP_SELECTOR, SYMBOL_UN_SELECTOR,
# IDs
ID_KEYWORD, ID_INPUT_ARG, ID_NAME,
# Keywords
KW_SELF, KW_SUPER, KW_THIS_CONTEXT, KW_NIL, KW_TRUE, KW_FALSE,
# OPs
OP_RETURN, OP_ASSIGN, OP_CASCADE, OP_CUSTOM,
# Delimiters
NEWLINE, COMMA
}
ignore = ' \t\r'
literals = {'[', ']', '(', ')', '{', '}', '|', '#{', '<', '>'}
def __init__(self):
self.nesting_level = 0
# Literals
LITERAL_HEX = r'0[xX]' + r_HEX_DIGIT + r'+'
LITERAL_OCT = r'0[oO]' + r_OCT_DIGIT + r'+'
LITERAL_BIN = r'0[bB]' + r_BIN_DIGIT + r'+'
LITERAL_DEC = r_INTEGER
LITERAL_FLOAT = r_FLOAT
LITERAL_STRING = r_SQ_STRING
LITERAL_COMMENT = r_DQ_STRING
LITERAL_CHAR = r'\$' + r_CHAR
# IDs
SYMBOL_KW_SELECTOR = r'#(' + r_KW + r')+'
SYMBOL_OP_SELECTOR = r'#(' + r_OPCHAR + r')+'
SYMBOL_UN_SELECTOR = r'#' + r_ID
ID_KEYWORD = r_ID + r':'
ID_INPUT_ARG = r':' + r_ID
ID_NAME = r_ID
# Keywords
ID_NAME['self'] = KW_SELF
ID_NAME['super'] = KW_SUPER
ID_NAME['thisContext'] = KW_THIS_CONTEXT
ID_NAME['nil'] = KW_NIL
ID_NAME['True'] = KW_TRUE
ID_NAME['False'] = KW_FALSE
# Reserved OPs
OP_RETURN = r'\^'
OP_ASSIGN = r':='
OP_CASCADE = r'\;'
OP_CUSTOM = r_OPCHAR + r'+'
@_(r'\[')
def context_open(self, t):
t.type = 'CONTEXT_OPEN'
self.nesting_level += 1
return t
@_(r'\]')
def context_close(self, t):
t.type = 'CONTEXT_CLOSE'
self.nesting_level -= 1
return t
COMMA = r'\.'
@_(r'\n+')
def NEWLINE(self, t):
self.lineno += len(t.value)
return t
def error(self, t):
print('Line %d: Bad character %r' % (self.lineno, t.value[0]))
self.index += 1 | src/compiler/splexer.py | from sly import Lexer
r_ID = r'[a-zA-Z_][a-zA-Z0-9_]*'
r_KW = r_ID + r':'
r_CHAR = r'\S'
r_OPCHAR = r'[\!\@\#\$\%\^\&\*\-\+\=\~\/\?\<\>\,\;\|\‘\\]'
r_SQ_STRING = r"\'(\\\'|[^\'])*\'"
r_DQ_STRING = r'\"(\\\"|[^\"])*\"'
r_BQ_STRING = r'\`(\\\`|[^\`])*\`'
r_BIN_DIGIT = r'[0-1]'
r_OCT_DIGIT = r'[0-7]'
r_DEC_DIGIT = r'[0-9]'
r_HEX_DIGIT = r'[0-9A-Za-z]'
r_HEX_NUMBER = r'0[xX]' + r_HEX_DIGIT + r'+'
r_OCT_NUMBER = r'0[oO]' + r_OCT_DIGIT + r'+'
r_BIN_NUMBER = r'0[bB]' + r_BIN_DIGIT + r'+'
r_INTEGER = r'\-?' + r_DEC_DIGIT + r'+'
r_FLOAT = r'\-?' + r_DEC_DIGIT + r'*\.' + r_INTEGER
class SPLexer(Lexer):
tokens = {
# Literals
LITERAL_HEX, LITERAL_OCT, LITERAL_OCT, LITERAL_BIN, LITERAL_DEC, LITERAL_FLOAT,
LITERAL_STRING, LITERAL_COMMENT, LITERAL_CHAR,
# SYMBOLS
SYMBOL_KW_SELECTOR, SYMBOL_OP_SELECTOR, SYMBOL_UN_SELECTOR,
# IDs
ID_KEYWORD, ID_INPUT_ARG, ID_NAME,
# Keywords
KW_SELF, KW_SUPER, KW_THIS_CONTEXT, KW_NIL, KW_TRUE, KW_FALSE,
# OPs
OP_RETURN, OP_ASSIGN, OP_CASCADE, OP_CUSTOM,
# Delimiters
NEWLINE, COMMA
}
ignore = ' \t\r'
literals = {'[', ']', '(', ')', '{', '}', '|', '#{', '<', '>'}
def __init__(self):
self.nesting_level = 0
# Literals
LITERAL_HEX = r'0[xX]' + r_HEX_DIGIT + r'+'
LITERAL_OCT = r'0[oO]' + r_OCT_DIGIT + r'+'
LITERAL_BIN = r'0[bB]' + r_BIN_DIGIT + r'+'
LITERAL_DEC = r_INTEGER
LITERAL_FLOAT = r_FLOAT
LITERAL_STRING = r_SQ_STRING
LITERAL_COMMENT = r_DQ_STRING
LITERAL_CHAR = r'\$' + r_CHAR
# IDs
SYMBOL_KW_SELECTOR = r'#(' + r_KW + r')+'
SYMBOL_OP_SELECTOR = r'#(' + r_OPCHAR + r')+'
SYMBOL_UN_SELECTOR = r'#' + r_ID
ID_KEYWORD = r_ID + r':'
ID_INPUT_ARG = r':' + r_ID
ID_NAME = r_ID
# Keywords
ID_NAME['self'] = KW_SELF
ID_NAME['super'] = KW_SUPER
ID_NAME['thisContext'] = KW_THIS_CONTEXT
ID_NAME['nil'] = KW_NIL
ID_NAME['True'] = KW_TRUE
ID_NAME['False'] = KW_FALSE
# Reserved OPs
OP_RETURN = r'\^'
OP_ASSIGN = r':='
OP_CASCADE = r'\;'
OP_CUSTOM = r_OPCHAR + r'+'
@_(r'\[')
def context_open(self, t):
t.type = 'CONTEXT_OPEN'
self.nesting_level += 1
return t
@_(r'\]')
def context_close(self, t):
t.type = 'CONTEXT_CLOSE'
self.nesting_level -= 1
return t
COMMA = r'\.'
@_(r'\n+')
def NEWLINE(self, t):
self.lineno += len(t.value)
return t
def error(self, t):
print('Line %d: Bad character %r' % (self.lineno, t.value[0]))
self.index += 1 | 0.358353 | 0.131563 |
import os
indir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
outdir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
channels=["mumu","ee"]
masses=["500","1000","2000"]
couplings=["0p02"]
#variables=["n_l1","pdgid_l1","e_l1","px_l1","py_l1","pz_l1","pt_l1","eta_l1","phi_l1","n_l2","pdgid_l2","e_l2","px_l2","py_l2","pz_l2","pt_l2","eta_l2","phi_l2","m_ll"]
variables=["pt_l1","pt_l2","eta_l1","eta_l2","phi_l1","phi_l2","m_ll"]
massString={
"500": "0p5",
"1000": "1p0",
"2000": "2p0",
}
titlex={
"m_ll": "m_{ll} [GeV]",
"pt_l1": "p_{T} (l1) [GeV]",
"pt_l2": "p_{T} (l2) [GeV]",
"phi_l1": "#phi (l1)",
"phi_l2": "#phi (l2)",
"eta_l1": "#eta (l1)",
"eta_l2": "#eta (l2)",
}
minx={
"eta_l1": "-4",
"eta_l2": "-4",
"phi_l1": "-3.5",
"phi_l2": "-3.5",
}
maxx={
"eta_l1": "4",
"eta_l2": "4",
"phi_l1": "3.5",
"phi_l2": "3.5",
}
channellabel={
"mumu": "#mu#mu",
"ee": "ee",
}
linecolor={
"500": 8,
"1000": 9,
"3000": 2,
}
for channel in channels:
for mass in masses:
for coupling in couplings:
for var in variables:
card="card_%s_%s_%s_%s.dat" % (channel, mass, coupling, var)
f=open(outdir + card,"w+")
f.write("file: %sevents_%s_%s_%s_DM.root\n" % (indir,channel,massString[mass],coupling))
#f.write("name: can_%s\n" % var)
f.write("name: events\n")
f.write("var: %s\n" % var)
f.write("cond: n_l1>0 && n_l2>0\n")
f.write("logy: 1\n")
f.write("title: %s GeV\n" % (mass))
f.write("titley: Events\n")
f.write("titlex: %s\n" % (titlex[var] if var in titlex else var))
f.write("minx: %s\n" % (minx[var] if var in minx else "0"))
f.write("maxx: %s\n" % (maxx[var] if var in maxx else "4000"))
f.write("miny: 0.5\n")
f.write("maxy: 50000\n")
f.write("linecolor: %s\n" % (linecolor[mass] if mass in linecolor else "2"))
f.write("atlas: Simulation\n")
f.write("atlasx: 0.6\n")
f.write("latex: Z'_{DM} \\rightarrow %s\n" % (channellabel[channel]))
f.write("latexx: 0.73\n")
f.write("latexy: 0.77\n")
f.write("sublatex: 10k events\n")
f.write("sublatexx: 0.73\n")
f.write("sublatexy: 0.69\n")
f.close() | excursion/testcases/madgraph5atlasval/share/makeCards_DM.py | import os
indir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
outdir="/afs/cern.ch/work/e/edreyer/public/madgraph5atlasval/source/MCVal/events_DM/"
channels=["mumu","ee"]
masses=["500","1000","2000"]
couplings=["0p02"]
#variables=["n_l1","pdgid_l1","e_l1","px_l1","py_l1","pz_l1","pt_l1","eta_l1","phi_l1","n_l2","pdgid_l2","e_l2","px_l2","py_l2","pz_l2","pt_l2","eta_l2","phi_l2","m_ll"]
variables=["pt_l1","pt_l2","eta_l1","eta_l2","phi_l1","phi_l2","m_ll"]
massString={
"500": "0p5",
"1000": "1p0",
"2000": "2p0",
}
titlex={
"m_ll": "m_{ll} [GeV]",
"pt_l1": "p_{T} (l1) [GeV]",
"pt_l2": "p_{T} (l2) [GeV]",
"phi_l1": "#phi (l1)",
"phi_l2": "#phi (l2)",
"eta_l1": "#eta (l1)",
"eta_l2": "#eta (l2)",
}
minx={
"eta_l1": "-4",
"eta_l2": "-4",
"phi_l1": "-3.5",
"phi_l2": "-3.5",
}
maxx={
"eta_l1": "4",
"eta_l2": "4",
"phi_l1": "3.5",
"phi_l2": "3.5",
}
channellabel={
"mumu": "#mu#mu",
"ee": "ee",
}
linecolor={
"500": 8,
"1000": 9,
"3000": 2,
}
for channel in channels:
for mass in masses:
for coupling in couplings:
for var in variables:
card="card_%s_%s_%s_%s.dat" % (channel, mass, coupling, var)
f=open(outdir + card,"w+")
f.write("file: %sevents_%s_%s_%s_DM.root\n" % (indir,channel,massString[mass],coupling))
#f.write("name: can_%s\n" % var)
f.write("name: events\n")
f.write("var: %s\n" % var)
f.write("cond: n_l1>0 && n_l2>0\n")
f.write("logy: 1\n")
f.write("title: %s GeV\n" % (mass))
f.write("titley: Events\n")
f.write("titlex: %s\n" % (titlex[var] if var in titlex else var))
f.write("minx: %s\n" % (minx[var] if var in minx else "0"))
f.write("maxx: %s\n" % (maxx[var] if var in maxx else "4000"))
f.write("miny: 0.5\n")
f.write("maxy: 50000\n")
f.write("linecolor: %s\n" % (linecolor[mass] if mass in linecolor else "2"))
f.write("atlas: Simulation\n")
f.write("atlasx: 0.6\n")
f.write("latex: Z'_{DM} \\rightarrow %s\n" % (channellabel[channel]))
f.write("latexx: 0.73\n")
f.write("latexy: 0.77\n")
f.write("sublatex: 10k events\n")
f.write("sublatexx: 0.73\n")
f.write("sublatexy: 0.69\n")
f.close() | 0.305179 | 0.239683 |
import numpy as np
import pytest
from foolbox.attacks import BoundaryAttack
from foolbox.attacks import DeepFoolAttack
from foolbox.attacks import BlendedUniformNoiseAttack
def test_attack(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_non_verbose(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=False)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_continue(bn_adversarial):
adv = bn_adversarial
attack1 = BlendedUniformNoiseAttack()
attack1(adv)
d1 = adv.distance.value
attack2 = BoundaryAttack()
attack2(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
assert adv.distance.value < d1
def test_attack_parameters(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
o = adv.original_image
np.random.seed(2)
starting_point = np.random.uniform(
0, 1, size=o.shape).astype(o.dtype)
attack(
adv,
iterations=200,
starting_point=starting_point,
log_every_n_steps=2,
tune_batch_size=False,
threaded_rnd=False,
threaded_gen=False,
alternative_generator=True,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_parameters2(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(
adv,
iterations=200,
alternative_generator=True,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
@pytest.mark.filterwarnings("ignore:Batch size tuning after so few steps")
def test_attack_parameters3(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
o = adv.original_image
np.random.seed(2)
starting_point = np.random.uniform(
0, 1, size=o.shape).astype(o.dtype)
attack(
adv,
iterations=200,
starting_point=starting_point,
log_every_n_steps=2,
tune_batch_size=30,
threaded_rnd=False,
threaded_gen=False,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_gl(gl_bn_adversarial):
adv = gl_bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_impossible(bn_impossible):
adv = bn_impossible
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is None
assert adv.distance.value == np.inf
@pytest.mark.filterwarnings("ignore:Internal inconsistency, probably caused")
def test_attack_convergence(bn_adversarial):
adv = bn_adversarial
attack1 = DeepFoolAttack()
attack1(adv)
attack2 = BoundaryAttack()
attack2(adv, iterations=5000, verbose=True)
# should converge
assert adv.image is not None
assert adv.distance.value < np.inf | foolbox/tests/test_attacks_boundary.py | import numpy as np
import pytest
from foolbox.attacks import BoundaryAttack
from foolbox.attacks import DeepFoolAttack
from foolbox.attacks import BlendedUniformNoiseAttack
def test_attack(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_non_verbose(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=False)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_continue(bn_adversarial):
adv = bn_adversarial
attack1 = BlendedUniformNoiseAttack()
attack1(adv)
d1 = adv.distance.value
attack2 = BoundaryAttack()
attack2(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
assert adv.distance.value < d1
def test_attack_parameters(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
o = adv.original_image
np.random.seed(2)
starting_point = np.random.uniform(
0, 1, size=o.shape).astype(o.dtype)
attack(
adv,
iterations=200,
starting_point=starting_point,
log_every_n_steps=2,
tune_batch_size=False,
threaded_rnd=False,
threaded_gen=False,
alternative_generator=True,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_parameters2(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
attack(
adv,
iterations=200,
alternative_generator=True,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
@pytest.mark.filterwarnings("ignore:Batch size tuning after so few steps")
def test_attack_parameters3(bn_adversarial):
adv = bn_adversarial
attack = BoundaryAttack()
o = adv.original_image
np.random.seed(2)
starting_point = np.random.uniform(
0, 1, size=o.shape).astype(o.dtype)
attack(
adv,
iterations=200,
starting_point=starting_point,
log_every_n_steps=2,
tune_batch_size=30,
threaded_rnd=False,
threaded_gen=False,
verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_gl(gl_bn_adversarial):
adv = gl_bn_adversarial
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_impossible(bn_impossible):
adv = bn_impossible
attack = BoundaryAttack()
attack(adv, iterations=200, verbose=True)
assert adv.image is None
assert adv.distance.value == np.inf
@pytest.mark.filterwarnings("ignore:Internal inconsistency, probably caused")
def test_attack_convergence(bn_adversarial):
adv = bn_adversarial
attack1 = DeepFoolAttack()
attack1(adv)
attack2 = BoundaryAttack()
attack2(adv, iterations=5000, verbose=True)
# should converge
assert adv.image is not None
assert adv.distance.value < np.inf | 0.674372 | 0.706384 |
import logging
import shutil
import subprocess
import sys
import zipfile
from io import BytesIO
from pathlib import Path
from urllib import request
amalgamation_url = 'https://sqlite.org/2021/sqlite-amalgamation-3360000.zip'
# Extension-functions
# ===================
# It breaks amalgamation if appended as other extension because it redefines
# several functions, so build it separately. Note that sql.js registers these
# extension functions by calling ``registerExtensionFunctions`` itself.
contrib_functions_url = 'https://sqlite.org/contrib/download/extension-functions.c?get=25'
extension_urls = (
# Miscellaneous extensions
# ========================
('https://sqlite.org/src/raw/c6bd5d24?at=series.c', 'sqlite3_series_init'),
('https://sqlite.org/src/raw/dbfd8543?at=closure.c', 'sqlite3_closure_init'),
('https://sqlite.org/src/raw/5bb2264c?at=uuid.c', 'sqlite3_uuid_init'),
('https://sqlite.org/src/raw/5853b0e5?at=regexp.c', 'sqlite3_regexp_init'),
# Third-party extension
# =====================
('https://github.com/jakethaw/pivot_vtab/raw/08ab0797/pivot_vtab.c', 'sqlite3_pivotvtab_init'),
)
sqljs_url = 'https://github.com/sql-js/sql.js/archive/refs/tags/v1.5.0.zip'
def _generate_extra_init_c_function(init_function_names):
auto_ext_calls = '\n'.join([
'nErr += sqlite3_auto_extension((void*){});'.format(init_fn)
for init_fn in init_function_names
])
return '''
int extra_init(const char* dummy)
{
int nErr = 0;
%s
return nErr ? SQLITE_ERROR : SQLITE_OK;
}
''' % auto_ext_calls
def _get_amalgamation(tgt: Path):
logging.info('Downloading and extracting SQLite amalgamation %s', amalgamation_url)
archive = zipfile.ZipFile(BytesIO(request.urlopen(amalgamation_url).read()))
archive_root_dir = zipfile.Path(archive, archive.namelist()[0])
for zpath in archive_root_dir.iterdir():
with zpath.open() as fr, (tgt / zpath.name).open('wb') as fw:
shutil.copyfileobj(fr, fw)
def _get_contrib_functions(tgt: Path):
request.urlretrieve(contrib_functions_url, tgt / 'extension-functions.c')
def _get_extensions(tgt: Path):
init_functions = []
sqlite3_c = tgt / 'sqlite3.c'
with sqlite3_c.open('ab') as f:
for url, init_fn in extension_urls:
logging.info('Downloading and appending to amalgamation %s', url)
with request.urlopen(url) as resp:
shutil.copyfileobj(resp, f)
init_functions.append(init_fn)
logging.info('Appending SQLITE_EXTRA_INIT to amalgamation')
f.write(_generate_extra_init_c_function(init_functions).encode())
def _get_sqljs(tgt: Path):
logging.info('Downloading and extracting sql.js %s', sqljs_url)
archive = zipfile.ZipFile(BytesIO(request.urlopen(sqljs_url).read()))
archive_root_dir = zipfile.Path(archive, archive.namelist()[0])
(tgt / 'sqljs').mkdir()
for zpath in (archive_root_dir / 'src').iterdir():
with zpath.open() as fr, (tgt / 'sqljs' / zpath.name).open('wb') as fw:
shutil.copyfileobj(fr, fw)
def configure(tgt: Path):
_get_amalgamation(tgt)
_get_contrib_functions(tgt)
_get_extensions(tgt)
_get_sqljs(tgt)
subprocess.check_call(['emcc', '--version'])
if __name__ == '__main__':
if sys.version_info < (3, 8):
print('Python 3.8 or higher is expected', file=sys.stderr)
sys.exit(1)
logging.basicConfig(level='INFO', format='%(asctime)s %(levelname)s %(name)s %(message)s')
src = Path('src')
src.mkdir()
configure(src) | lib/sql-js/configure.py | import logging
import shutil
import subprocess
import sys
import zipfile
from io import BytesIO
from pathlib import Path
from urllib import request
amalgamation_url = 'https://sqlite.org/2021/sqlite-amalgamation-3360000.zip'
# Extension-functions
# ===================
# It breaks amalgamation if appended as other extension because it redefines
# several functions, so build it separately. Note that sql.js registers these
# extension functions by calling ``registerExtensionFunctions`` itself.
contrib_functions_url = 'https://sqlite.org/contrib/download/extension-functions.c?get=25'
extension_urls = (
# Miscellaneous extensions
# ========================
('https://sqlite.org/src/raw/c6bd5d24?at=series.c', 'sqlite3_series_init'),
('https://sqlite.org/src/raw/dbfd8543?at=closure.c', 'sqlite3_closure_init'),
('https://sqlite.org/src/raw/5bb2264c?at=uuid.c', 'sqlite3_uuid_init'),
('https://sqlite.org/src/raw/5853b0e5?at=regexp.c', 'sqlite3_regexp_init'),
# Third-party extension
# =====================
('https://github.com/jakethaw/pivot_vtab/raw/08ab0797/pivot_vtab.c', 'sqlite3_pivotvtab_init'),
)
sqljs_url = 'https://github.com/sql-js/sql.js/archive/refs/tags/v1.5.0.zip'
def _generate_extra_init_c_function(init_function_names):
auto_ext_calls = '\n'.join([
'nErr += sqlite3_auto_extension((void*){});'.format(init_fn)
for init_fn in init_function_names
])
return '''
int extra_init(const char* dummy)
{
int nErr = 0;
%s
return nErr ? SQLITE_ERROR : SQLITE_OK;
}
''' % auto_ext_calls
def _get_amalgamation(tgt: Path):
logging.info('Downloading and extracting SQLite amalgamation %s', amalgamation_url)
archive = zipfile.ZipFile(BytesIO(request.urlopen(amalgamation_url).read()))
archive_root_dir = zipfile.Path(archive, archive.namelist()[0])
for zpath in archive_root_dir.iterdir():
with zpath.open() as fr, (tgt / zpath.name).open('wb') as fw:
shutil.copyfileobj(fr, fw)
def _get_contrib_functions(tgt: Path):
request.urlretrieve(contrib_functions_url, tgt / 'extension-functions.c')
def _get_extensions(tgt: Path):
init_functions = []
sqlite3_c = tgt / 'sqlite3.c'
with sqlite3_c.open('ab') as f:
for url, init_fn in extension_urls:
logging.info('Downloading and appending to amalgamation %s', url)
with request.urlopen(url) as resp:
shutil.copyfileobj(resp, f)
init_functions.append(init_fn)
logging.info('Appending SQLITE_EXTRA_INIT to amalgamation')
f.write(_generate_extra_init_c_function(init_functions).encode())
def _get_sqljs(tgt: Path):
logging.info('Downloading and extracting sql.js %s', sqljs_url)
archive = zipfile.ZipFile(BytesIO(request.urlopen(sqljs_url).read()))
archive_root_dir = zipfile.Path(archive, archive.namelist()[0])
(tgt / 'sqljs').mkdir()
for zpath in (archive_root_dir / 'src').iterdir():
with zpath.open() as fr, (tgt / 'sqljs' / zpath.name).open('wb') as fw:
shutil.copyfileobj(fr, fw)
def configure(tgt: Path):
_get_amalgamation(tgt)
_get_contrib_functions(tgt)
_get_extensions(tgt)
_get_sqljs(tgt)
subprocess.check_call(['emcc', '--version'])
if __name__ == '__main__':
if sys.version_info < (3, 8):
print('Python 3.8 or higher is expected', file=sys.stderr)
sys.exit(1)
logging.basicConfig(level='INFO', format='%(asctime)s %(levelname)s %(name)s %(message)s')
src = Path('src')
src.mkdir()
configure(src) | 0.261331 | 0.209834 |
from discord.ext import commands
import discord
import os
import random
import datetime
import asyncio
class FunCommands(commands.Cog):
def __init__(self, client):
self.client = client
# Bedtime command, because everyone should go to sleep at 7 pm like civilized people
@commands.command()
async def bedtime(self, ctx):
"""Tells you to go to sleep"""
current = datetime.datetime.now().time()
if ((current.hour >= 19) or (current.hour < 6)) and (current.minute > 0):
await ctx.send("You should really go to sleep, You are past your bedtime of 7:00 pm")
elif ((current.hour >= 0) and (current.hour < 6)) and (current.minute > 0):
await ctx.send("Boi it's past midnight you have school tomorrow, <NAME> will slap you silly!")
else:
await ctx.send("It is not 7:00 pm yet, keep on doing what you're doing!")
# puns command, reads puns fro ma file and selects one at random
@commands.command()
async def pun(self, ctx):
"""Tells a random dad joke that nobody likes"""
list_of_puns = []
# Get path of current directory
cur_path = os.path.dirname(__file__)
# Go down one directory to the parent and open file
parent_path = os.path.split(cur_path)[0]
new_path = os.path.relpath('data\\puns.txt', parent_path)
# open file and append each line to the list
with open(new_path, 'r') as f:
for line in f:
list_of_puns.append(line)
# select a random pun and print it
i = random.randint(0, len(list_of_puns))
await ctx.send(list_of_puns[i])
# colossal command, injects a bunch of garbage into the channel it was called in
@commands.command()
async def colossal(self, ctx):
"""Creates a colossal mess in chat. Not for the faint of heart"""
trash = ["the bruh momentum is the event leading up to a bruh moment",
"hf hf hf hf hf hf hf hf\nhf hf hf hf hf hf hf hf",
"anyways, {} is getting their ass exposed".format(ctx.author.mention),
"dedotated W A M",
"Help my pee is orange I'm rotting",
"Meet marcel toing. Proud owner of restaurant ratatatatoing\nchef toing toing.\nserving only the freshest toing.",
"seeya idot",
"THAT'S IT, I\'M GONNA DDOS YOU",
"mmmmm c r e a m y",
"snans",
"SNANS",
"MINECRAP",
"Ok yeah sure thing that's valid whatever you say Mr. Galaxy Brain you're the real colossal mess",
"wan go diney wurl\nflawda?\northano\nme wanna go flawda\ndindlee whirld!",
"This is an absolute colossal mess",
"Don't mind me I'm just a bit of a mess",
"My treehouse, my rules. No trees allowed",
"S<NAME>",
"Is it CRONCH time?\nIt is always spicy big C R O N C H time",
"I have my pocket right here in my pocket"]
# loop and choose a random t r a s h to print
for i in range(40):
await ctx.send(trash[random.randint(0, 19)])
await asyncio.sleep(1.5)
# Command to 🅱ify a sentence
@commands.command(pass_context=True)
async def b(self, ctx, *args):
"""Fills a sentence with the b emoji. Must have at least one word following command"""
new_list = []
# check if command was used properly
if len(args) == 0:
await ctx.send("Include at least one word in that command you garfield lookin ass")
# loop through arguments
for i in args:
# edit the string, replacing b with emoji B. Also need to set string to lowercase
new_str = i.lower().replace('b', '🅱')
# Create a temp list in order to edit the first character because python strings are immutable
temp_list = list(new_str)
temp_list[0] = '🅱'
final_str = "".join(temp_list)
# append final version of the word
new_list.append(final_str)
await ctx.send(" ".join(new_list))
# A terrible explanation of what a pointer is
@commands.command()
async def pointer(self, ctx):
"""A terrible explanation of how a pointer works in c"""
await ctx.send("A pointer is something that points to a point in memory. This point is where the pointer is stored, allowing you to do stuff. You can even have pointers that point to pointers, which is called a double pointer. For example, you could have a pointer point to a pointer that points to a structure that has a variable which is a pointer to a pointer to an integer. There is no point.\n-Professor Kremer, phd in bigbrain")
# funky kong command
@commands.command()
async def funky(self, ctx):
"""Funky consoles you in your time of need"""
await ctx.send("Funky cares for all your needs. Funky loves you for who you are.")
await asyncio.sleep(1.5)
await ctx.send("https://www.youtube.com/watch?v=68JQtxTzjqc")
await asyncio.sleep(1.5)
await ctx.send("https://www.youtube.com/watch?v=Dj7K2Bql6D4")
await asyncio.sleep(1.5)
embed = discord.Embed()
embed.set_image(url='https://vignette.wikia.nocookie.net/mario/images/b/b0/Funky_Kong_Artwork_-_Mario_Kart_Wii.png/revision/latest?cb=20120424225007')
await ctx.send(embed=embed)
# cronch command to tell if you should order a big cronch right now. Based on normal eating times
@commands.command()
async def cronch(self, ctx):
"""Tells you if it is time to get a big cronch"""
cronch_time = [11, 12, 13, 17, 18, 19, 7, 8, 9]
current = datetime.datetime.now().time()
if current.hour in cronch_time:
message = "It is time to CRONCH"
else:
message = "It is not CRONCH time right now"
await ctx.send(message)
# roast command, same idea as pun command but with roasts
@commands.command()
async def roast(self, ctx):
"""Roasts the living hell out of the person who called this command"""
list_of_roasts = []
# Get path of pun file
cur_path = os.path.dirname(__file__)
# Go down one directory to the parent and open file
parent_path = os.path.split(cur_path)[0]
new_path = os.path.relpath('data\\roast.txt', parent_path)
# open file and append each line to the list
with open(new_path, 'r') as f:
for line in f:
list_of_roasts.append(line)
# select a random pun and print it
i = random.randint(0, len(list_of_roasts))
await ctx.send(list_of_roasts[i])
# adds the cog to the main bot
def setup(client):
client.add_cog(FunCommands(client)) | cogs/fun.py | from discord.ext import commands
import discord
import os
import random
import datetime
import asyncio
class FunCommands(commands.Cog):
def __init__(self, client):
self.client = client
# Bedtime command, because everyone should go to sleep at 7 pm like civilized people
@commands.command()
async def bedtime(self, ctx):
"""Tells you to go to sleep"""
current = datetime.datetime.now().time()
if ((current.hour >= 19) or (current.hour < 6)) and (current.minute > 0):
await ctx.send("You should really go to sleep, You are past your bedtime of 7:00 pm")
elif ((current.hour >= 0) and (current.hour < 6)) and (current.minute > 0):
await ctx.send("Boi it's past midnight you have school tomorrow, <NAME> will slap you silly!")
else:
await ctx.send("It is not 7:00 pm yet, keep on doing what you're doing!")
# puns command, reads puns fro ma file and selects one at random
@commands.command()
async def pun(self, ctx):
"""Tells a random dad joke that nobody likes"""
list_of_puns = []
# Get path of current directory
cur_path = os.path.dirname(__file__)
# Go down one directory to the parent and open file
parent_path = os.path.split(cur_path)[0]
new_path = os.path.relpath('data\\puns.txt', parent_path)
# open file and append each line to the list
with open(new_path, 'r') as f:
for line in f:
list_of_puns.append(line)
# select a random pun and print it
i = random.randint(0, len(list_of_puns))
await ctx.send(list_of_puns[i])
# colossal command, injects a bunch of garbage into the channel it was called in
@commands.command()
async def colossal(self, ctx):
"""Creates a colossal mess in chat. Not for the faint of heart"""
trash = ["the bruh momentum is the event leading up to a bruh moment",
"hf hf hf hf hf hf hf hf\nhf hf hf hf hf hf hf hf",
"anyways, {} is getting their ass exposed".format(ctx.author.mention),
"dedotated W A M",
"Help my pee is orange I'm rotting",
"Meet marcel toing. Proud owner of restaurant ratatatatoing\nchef toing toing.\nserving only the freshest toing.",
"seeya idot",
"THAT'S IT, I\'M GONNA DDOS YOU",
"mmmmm c r e a m y",
"snans",
"SNANS",
"MINECRAP",
"Ok yeah sure thing that's valid whatever you say Mr. Galaxy Brain you're the real colossal mess",
"wan go diney wurl\nflawda?\northano\nme wanna go flawda\ndindlee whirld!",
"This is an absolute colossal mess",
"Don't mind me I'm just a bit of a mess",
"My treehouse, my rules. No trees allowed",
"S<NAME>",
"Is it CRONCH time?\nIt is always spicy big C R O N C H time",
"I have my pocket right here in my pocket"]
# loop and choose a random t r a s h to print
for i in range(40):
await ctx.send(trash[random.randint(0, 19)])
await asyncio.sleep(1.5)
# Command to 🅱ify a sentence
@commands.command(pass_context=True)
async def b(self, ctx, *args):
"""Fills a sentence with the b emoji. Must have at least one word following command"""
new_list = []
# check if command was used properly
if len(args) == 0:
await ctx.send("Include at least one word in that command you garfield lookin ass")
# loop through arguments
for i in args:
# edit the string, replacing b with emoji B. Also need to set string to lowercase
new_str = i.lower().replace('b', '🅱')
# Create a temp list in order to edit the first character because python strings are immutable
temp_list = list(new_str)
temp_list[0] = '🅱'
final_str = "".join(temp_list)
# append final version of the word
new_list.append(final_str)
await ctx.send(" ".join(new_list))
# A terrible explanation of what a pointer is
@commands.command()
async def pointer(self, ctx):
"""A terrible explanation of how a pointer works in c"""
await ctx.send("A pointer is something that points to a point in memory. This point is where the pointer is stored, allowing you to do stuff. You can even have pointers that point to pointers, which is called a double pointer. For example, you could have a pointer point to a pointer that points to a structure that has a variable which is a pointer to a pointer to an integer. There is no point.\n-Professor Kremer, phd in bigbrain")
# funky kong command
@commands.command()
async def funky(self, ctx):
"""Funky consoles you in your time of need"""
await ctx.send("Funky cares for all your needs. Funky loves you for who you are.")
await asyncio.sleep(1.5)
await ctx.send("https://www.youtube.com/watch?v=68JQtxTzjqc")
await asyncio.sleep(1.5)
await ctx.send("https://www.youtube.com/watch?v=Dj7K2Bql6D4")
await asyncio.sleep(1.5)
embed = discord.Embed()
embed.set_image(url='https://vignette.wikia.nocookie.net/mario/images/b/b0/Funky_Kong_Artwork_-_Mario_Kart_Wii.png/revision/latest?cb=20120424225007')
await ctx.send(embed=embed)
# cronch command to tell if you should order a big cronch right now. Based on normal eating times
@commands.command()
async def cronch(self, ctx):
"""Tells you if it is time to get a big cronch"""
cronch_time = [11, 12, 13, 17, 18, 19, 7, 8, 9]
current = datetime.datetime.now().time()
if current.hour in cronch_time:
message = "It is time to CRONCH"
else:
message = "It is not CRONCH time right now"
await ctx.send(message)
# roast command, same idea as pun command but with roasts
@commands.command()
async def roast(self, ctx):
"""Roasts the living hell out of the person who called this command"""
list_of_roasts = []
# Get path of pun file
cur_path = os.path.dirname(__file__)
# Go down one directory to the parent and open file
parent_path = os.path.split(cur_path)[0]
new_path = os.path.relpath('data\\roast.txt', parent_path)
# open file and append each line to the list
with open(new_path, 'r') as f:
for line in f:
list_of_roasts.append(line)
# select a random pun and print it
i = random.randint(0, len(list_of_roasts))
await ctx.send(list_of_roasts[i])
# adds the cog to the main bot
def setup(client):
client.add_cog(FunCommands(client)) | 0.209187 | 0.150621 |
import sqlite3
# Standard Library
from argparse import ArgumentParser, Namespace
from sqlite3 import Cursor
# 3rd Party
from pygments.styles import STYLE_MAP
from tabulate import tabulate
from .context import Context, SqliteCtxt
# Relative
from .meta_cmds import meta_cmds
from .utils import set_db_con, log, set_prompt_sess, set_toolbar, set_env_vars, set_verbosity
def main() -> None:
parser: ArgumentParser = ArgumentParser(
prog='SQLiteREPL',
description='A dead simple REPL for SQLite',
epilog='bye!')
parser.add_argument(
'database',
help='path to database',
nargs='?',
default=':memory:')
parser.add_argument(
'-H',
'--history',
metavar='PATH',
help='path to history file',
nargs='?',
default='~/.SqliteREPL_history')
parser.add_argument(
'-e',
'--eval',
default=False,
metavar='FILE',
nargs='?',
help='eval SQL script before running the REPL')
parser.add_argument(
'-m',
'--multiline',
help='enable multiline mode (useful for creating tables)',
action='store_true',
default=False)
parser.add_argument(
'-v',
'--verbose',
help='enable verbose logging',
action='store_true',
default=False)
parser.add_argument(
'-M',
'--memory',
help='in memory database',
action='store_true',
default=False)
parser.add_argument(
'--no-history-search',
dest='history_search',
help='disable history search',
action='store_false',
default=True)
parser.add_argument(
'--no-complete-while-typing',
dest='complete_while_typing',
help='disable completion while typing',
action='store_false',
default=True)
parser.add_argument(
'--no-infobar',
dest='infobar',
help='disable info bar at the bottom of the screen',
action='store_false',
default=True)
parser.add_argument(
'--readonly',
help='open the database is READ-ONLY mode',
action='store_true',
default=False)
parser.add_argument(
'--no-editor',
dest='editor',
help='disable opening in $EDITOR',
action='store_false',
default=True)
parser.add_argument(
'-t',
'--table_style',
help='set table style to <STYLE>, (see https://pypi.org/project/tabulate/) (hint: try "simple", "orgtbl", "pipe", "html" or "latex")',
metavar='STYLE',
choices=[
"fancy_grid",
"grid",
"html",
"jira",
"latex",
"latex_booktabs",
"latex_raw",
"mediawiki",
"moinmoin",
"orgtbl",
"pipe",
"plain",
"presto",
"psql",
"rst",
"simple",
"textile",
"youtrack",
],
default='simple')
parser.add_argument(
'-s',
'--style',
metavar='STYLE',
help='pygments style (see http://pygments.org/docs/styles/#builtin-styles)',
choices=list(STYLE_MAP.keys()),
default='default')
parser.add_argument(
'-p',
'--prompt',
metavar='STRING',
help='prompt string',
default='SQLite >> ')
args: Namespace = parser.parse_args()
context: SqliteCtxt = Context.from_namespace(args)
set_verbosity(context)
set_db_con(context)
set_prompt_sess(context)
set_env_vars(context)
while True:
try:
log.debug(context)
# refreshes it so that it displays up-to-date info
set_toolbar(context)
context.user_input = context.prompt_session.prompt().strip()
fired = False
for cmd in meta_cmds:
if cmd.test(context.user_input):
cmd.fire(context)
fired = True
break
if fired:
continue
elif context.user_input:
try:
with context.con as c:
cursor: Cursor = c.cursor()
cursor.execute(context.user_input)
print(tabulate(cursor.fetchall(), tablefmt=context.table_style))
cursor.close()
except (sqlite3.Error, sqlite3.IntegrityError) as e:
print(f"An error occurred: {e.args[0]}")
except (EOFError, KeyboardInterrupt):
break | sqliterepl/main.py |
import sqlite3
# Standard Library
from argparse import ArgumentParser, Namespace
from sqlite3 import Cursor
# 3rd Party
from pygments.styles import STYLE_MAP
from tabulate import tabulate
from .context import Context, SqliteCtxt
# Relative
from .meta_cmds import meta_cmds
from .utils import set_db_con, log, set_prompt_sess, set_toolbar, set_env_vars, set_verbosity
def main() -> None:
parser: ArgumentParser = ArgumentParser(
prog='SQLiteREPL',
description='A dead simple REPL for SQLite',
epilog='bye!')
parser.add_argument(
'database',
help='path to database',
nargs='?',
default=':memory:')
parser.add_argument(
'-H',
'--history',
metavar='PATH',
help='path to history file',
nargs='?',
default='~/.SqliteREPL_history')
parser.add_argument(
'-e',
'--eval',
default=False,
metavar='FILE',
nargs='?',
help='eval SQL script before running the REPL')
parser.add_argument(
'-m',
'--multiline',
help='enable multiline mode (useful for creating tables)',
action='store_true',
default=False)
parser.add_argument(
'-v',
'--verbose',
help='enable verbose logging',
action='store_true',
default=False)
parser.add_argument(
'-M',
'--memory',
help='in memory database',
action='store_true',
default=False)
parser.add_argument(
'--no-history-search',
dest='history_search',
help='disable history search',
action='store_false',
default=True)
parser.add_argument(
'--no-complete-while-typing',
dest='complete_while_typing',
help='disable completion while typing',
action='store_false',
default=True)
parser.add_argument(
'--no-infobar',
dest='infobar',
help='disable info bar at the bottom of the screen',
action='store_false',
default=True)
parser.add_argument(
'--readonly',
help='open the database is READ-ONLY mode',
action='store_true',
default=False)
parser.add_argument(
'--no-editor',
dest='editor',
help='disable opening in $EDITOR',
action='store_false',
default=True)
parser.add_argument(
'-t',
'--table_style',
help='set table style to <STYLE>, (see https://pypi.org/project/tabulate/) (hint: try "simple", "orgtbl", "pipe", "html" or "latex")',
metavar='STYLE',
choices=[
"fancy_grid",
"grid",
"html",
"jira",
"latex",
"latex_booktabs",
"latex_raw",
"mediawiki",
"moinmoin",
"orgtbl",
"pipe",
"plain",
"presto",
"psql",
"rst",
"simple",
"textile",
"youtrack",
],
default='simple')
parser.add_argument(
'-s',
'--style',
metavar='STYLE',
help='pygments style (see http://pygments.org/docs/styles/#builtin-styles)',
choices=list(STYLE_MAP.keys()),
default='default')
parser.add_argument(
'-p',
'--prompt',
metavar='STRING',
help='prompt string',
default='SQLite >> ')
args: Namespace = parser.parse_args()
context: SqliteCtxt = Context.from_namespace(args)
set_verbosity(context)
set_db_con(context)
set_prompt_sess(context)
set_env_vars(context)
while True:
try:
log.debug(context)
# refreshes it so that it displays up-to-date info
set_toolbar(context)
context.user_input = context.prompt_session.prompt().strip()
fired = False
for cmd in meta_cmds:
if cmd.test(context.user_input):
cmd.fire(context)
fired = True
break
if fired:
continue
elif context.user_input:
try:
with context.con as c:
cursor: Cursor = c.cursor()
cursor.execute(context.user_input)
print(tabulate(cursor.fetchall(), tablefmt=context.table_style))
cursor.close()
except (sqlite3.Error, sqlite3.IntegrityError) as e:
print(f"An error occurred: {e.args[0]}")
except (EOFError, KeyboardInterrupt):
break | 0.532911 | 0.076098 |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiConnectivityGetconnectionendpointdetailsInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, cep_id_or_name=None, nep_id_or_name=None, node_id_or_name=None, topology_id_or_name=None): # noqa: E501
"""TapiConnectivityGetconnectionendpointdetailsInput - a model defined in OpenAPI
:param cep_id_or_name: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type cep_id_or_name: str
:param nep_id_or_name: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type nep_id_or_name: str
:param node_id_or_name: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type node_id_or_name: str
:param topology_id_or_name: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type topology_id_or_name: str
"""
self.openapi_types = {
'cep_id_or_name': str,
'nep_id_or_name': str,
'node_id_or_name': str,
'topology_id_or_name': str
}
self.attribute_map = {
'cep_id_or_name': 'cep-id-or-name',
'nep_id_or_name': 'nep-id-or-name',
'node_id_or_name': 'node-id-or-name',
'topology_id_or_name': 'topology-id-or-name'
}
self._cep_id_or_name = cep_id_or_name
self._nep_id_or_name = nep_id_or_name
self._node_id_or_name = node_id_or_name
self._topology_id_or_name = topology_id_or_name
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityGetconnectionendpointdetailsInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.getconnectionendpointdetails.Input of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:rtype: TapiConnectivityGetconnectionendpointdetailsInput
"""
return util.deserialize_model(dikt, cls)
@property
def cep_id_or_name(self):
"""Gets the cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._cep_id_or_name
@cep_id_or_name.setter
def cep_id_or_name(self, cep_id_or_name):
"""Sets the cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param cep_id_or_name: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type cep_id_or_name: str
"""
self._cep_id_or_name = cep_id_or_name
@property
def nep_id_or_name(self):
"""Gets the nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._nep_id_or_name
@nep_id_or_name.setter
def nep_id_or_name(self, nep_id_or_name):
"""Sets the nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param nep_id_or_name: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type nep_id_or_name: str
"""
self._nep_id_or_name = nep_id_or_name
@property
def node_id_or_name(self):
"""Gets the node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._node_id_or_name
@node_id_or_name.setter
def node_id_or_name(self, node_id_or_name):
"""Sets the node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param node_id_or_name: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type node_id_or_name: str
"""
self._node_id_or_name = node_id_or_name
@property
def topology_id_or_name(self):
"""Gets the topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._topology_id_or_name
@topology_id_or_name.setter
def topology_id_or_name(self, topology_id_or_name):
"""Sets the topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param topology_id_or_name: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type topology_id_or_name: str
"""
self._topology_id_or_name = topology_id_or_name | RI/flask_server/tapi_server/models/tapi_connectivity_getconnectionendpointdetails_input.py |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiConnectivityGetconnectionendpointdetailsInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, cep_id_or_name=None, nep_id_or_name=None, node_id_or_name=None, topology_id_or_name=None): # noqa: E501
"""TapiConnectivityGetconnectionendpointdetailsInput - a model defined in OpenAPI
:param cep_id_or_name: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type cep_id_or_name: str
:param nep_id_or_name: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type nep_id_or_name: str
:param node_id_or_name: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type node_id_or_name: str
:param topology_id_or_name: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:type topology_id_or_name: str
"""
self.openapi_types = {
'cep_id_or_name': str,
'nep_id_or_name': str,
'node_id_or_name': str,
'topology_id_or_name': str
}
self.attribute_map = {
'cep_id_or_name': 'cep-id-or-name',
'nep_id_or_name': 'nep-id-or-name',
'node_id_or_name': 'node-id-or-name',
'topology_id_or_name': 'topology-id-or-name'
}
self._cep_id_or_name = cep_id_or_name
self._nep_id_or_name = nep_id_or_name
self._node_id_or_name = node_id_or_name
self._topology_id_or_name = topology_id_or_name
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityGetconnectionendpointdetailsInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.getconnectionendpointdetails.Input of this TapiConnectivityGetconnectionendpointdetailsInput. # noqa: E501
:rtype: TapiConnectivityGetconnectionendpointdetailsInput
"""
return util.deserialize_model(dikt, cls)
@property
def cep_id_or_name(self):
"""Gets the cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._cep_id_or_name
@cep_id_or_name.setter
def cep_id_or_name(self, cep_id_or_name):
"""Sets the cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param cep_id_or_name: The cep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type cep_id_or_name: str
"""
self._cep_id_or_name = cep_id_or_name
@property
def nep_id_or_name(self):
"""Gets the nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._nep_id_or_name
@nep_id_or_name.setter
def nep_id_or_name(self, nep_id_or_name):
"""Sets the nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param nep_id_or_name: The nep_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type nep_id_or_name: str
"""
self._nep_id_or_name = nep_id_or_name
@property
def node_id_or_name(self):
"""Gets the node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._node_id_or_name
@node_id_or_name.setter
def node_id_or_name(self, node_id_or_name):
"""Sets the node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param node_id_or_name: The node_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type node_id_or_name: str
"""
self._node_id_or_name = node_id_or_name
@property
def topology_id_or_name(self):
"""Gets the topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:return: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:rtype: str
"""
return self._topology_id_or_name
@topology_id_or_name.setter
def topology_id_or_name(self, topology_id_or_name):
"""Sets the topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
none # noqa: E501
:param topology_id_or_name: The topology_id_or_name of this TapiConnectivityGetconnectionendpointdetailsInput.
:type topology_id_or_name: str
"""
self._topology_id_or_name = topology_id_or_name | 0.811415 | 0.05951 |
import collections
import json
import logging
import traceback
from logging import Formatter, LogRecord
try:
import colorama
except ImportError:
colorama = None
"""Colors used by Colorama (if installed)"""
LOG_COLORS = {}
if colorama:
colorama.init()
# default color scheme
LOG_COLORS = {
logging.CRITICAL: colorama.Fore.BLUE,
logging.ERROR: colorama.Fore.RED,
logging.WARNING: colorama.Fore.YELLOW,
logging.INFO: colorama.Fore.GREEN,
# logging.DEBUG: colorama.Fore.LIGHTBLACK_EX,
}
"""Fields of a default log record"""
_LOG_RECORD_FIELDS = set(logging.makeLogRecord({}).__dict__.keys())
def set_gmt(enable=True):
"""Set GMT time for logging formatters
:param enable: Whether to set GMT or localtime
"""
import time
Formatter.converter = time.gmtime if enable else time.localtime
class ColorFormatter(Formatter):
"""Colorize message based on log level"""
def formatMessage(self, record): # noqa: N802
# we can change the message because each call to format() resets it
if record.levelno in LOG_COLORS:
record.message = LOG_COLORS[record.levelno] + record.message + colorama.Style.RESET_ALL
return super().formatMessage(record)
class JSONFormatter(Formatter):
"""Format the log message as a single-line JSON dict"""
def format(self, record: LogRecord) -> str:
d = collections.OrderedDict()
if self.usesTime():
d['time'] = self.formatTime(record, self.datefmt)
d['level'] = record.levelname
d['message'] = self.formatMessage(record)
d['location'] = {
label: getattr(record, key, None)
for label, key in [
('path_name', 'pathname'),
# ('file_name', 'filename'),
('module', 'module'),
('line', 'lineno'),
('function', 'funcName'),
]
}
if record.process:
d['process'] = {'id': record.process, 'name': record.processName}
if record.thread:
d['thread'] = {'id': record.thread, 'name': record.threadName}
if record.exc_info:
d['exception'] = self.formatException(record.exc_info)
if record.stack_info:
d['stack_info'] = self.formatStack(record.stack_info)
extra = {k: v for k, v in record.__dict__.items() if k not in _LOG_RECORD_FIELDS}
if extra:
d['extra'] = extra
return json.dumps(d, check_circular=False, default=lambda v: str(v))
def usesTime(self) -> bool: # noqa: N802
return True
def formatMessage(self, record: LogRecord) -> str: # noqa: N802
return record.getMessage()
def formatException(self, ei): # noqa: N802
return (
{
'type': ei[0].__name__,
'message': str(ei[1]),
'detail': traceback.format_exception(*ei),
}
if ei
else {}
) | alphaconf/logging_util.py | import collections
import json
import logging
import traceback
from logging import Formatter, LogRecord
try:
import colorama
except ImportError:
colorama = None
"""Colors used by Colorama (if installed)"""
LOG_COLORS = {}
if colorama:
colorama.init()
# default color scheme
LOG_COLORS = {
logging.CRITICAL: colorama.Fore.BLUE,
logging.ERROR: colorama.Fore.RED,
logging.WARNING: colorama.Fore.YELLOW,
logging.INFO: colorama.Fore.GREEN,
# logging.DEBUG: colorama.Fore.LIGHTBLACK_EX,
}
"""Fields of a default log record"""
_LOG_RECORD_FIELDS = set(logging.makeLogRecord({}).__dict__.keys())
def set_gmt(enable=True):
"""Set GMT time for logging formatters
:param enable: Whether to set GMT or localtime
"""
import time
Formatter.converter = time.gmtime if enable else time.localtime
class ColorFormatter(Formatter):
"""Colorize message based on log level"""
def formatMessage(self, record): # noqa: N802
# we can change the message because each call to format() resets it
if record.levelno in LOG_COLORS:
record.message = LOG_COLORS[record.levelno] + record.message + colorama.Style.RESET_ALL
return super().formatMessage(record)
class JSONFormatter(Formatter):
"""Format the log message as a single-line JSON dict"""
def format(self, record: LogRecord) -> str:
d = collections.OrderedDict()
if self.usesTime():
d['time'] = self.formatTime(record, self.datefmt)
d['level'] = record.levelname
d['message'] = self.formatMessage(record)
d['location'] = {
label: getattr(record, key, None)
for label, key in [
('path_name', 'pathname'),
# ('file_name', 'filename'),
('module', 'module'),
('line', 'lineno'),
('function', 'funcName'),
]
}
if record.process:
d['process'] = {'id': record.process, 'name': record.processName}
if record.thread:
d['thread'] = {'id': record.thread, 'name': record.threadName}
if record.exc_info:
d['exception'] = self.formatException(record.exc_info)
if record.stack_info:
d['stack_info'] = self.formatStack(record.stack_info)
extra = {k: v for k, v in record.__dict__.items() if k not in _LOG_RECORD_FIELDS}
if extra:
d['extra'] = extra
return json.dumps(d, check_circular=False, default=lambda v: str(v))
def usesTime(self) -> bool: # noqa: N802
return True
def formatMessage(self, record: LogRecord) -> str: # noqa: N802
return record.getMessage()
def formatException(self, ei): # noqa: N802
return (
{
'type': ei[0].__name__,
'message': str(ei[1]),
'detail': traceback.format_exception(*ei),
}
if ei
else {}
) | 0.461502 | 0.134009 |