body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
b3dea12fbe3fb96bb84a57917085cfb746c5bb1ea5f14f9be2d5de97a37851c8
def ftnp(x): '\n Fraction to Negative Power\n Converts the denominator in a fraction representation of a time signature\n into a value used by midi.TimesignatureEvent data[1] i.e. negative power\n ' return round(math.log2(x))
Fraction to Negative Power Converts the denominator in a fraction representation of a time signature into a value used by midi.TimesignatureEvent data[1] i.e. negative power
mu7ron/utils.py
ftnp
eM7RON/mu7RON
0
python
def ftnp(x): '\n Fraction to Negative Power\n Converts the denominator in a fraction representation of a time signature\n into a value used by midi.TimesignatureEvent data[1] i.e. negative power\n ' return round(math.log2(x))
def ftnp(x): '\n Fraction to Negative Power\n Converts the denominator in a fraction representation of a time signature\n into a value used by midi.TimesignatureEvent data[1] i.e. negative power\n ' return round(math.log2(x))<|docstring|>Fraction to Negative Power Converts the denominator in a fraction representation of a time signature into a value used by midi.TimesignatureEvent data[1] i.e. negative power<|endoftext|>
fb516355bbab4d9dd62bc50f9eff1919fa0afe01db6d29cf8066c996560d6b73
def bpm_to_mspt(bpm, res=480): '\n Coverts an integer value of beats per minute to miliseconds per quarter note\n ' return ((60000 / res) / bpm)
Coverts an integer value of beats per minute to miliseconds per quarter note
mu7ron/utils.py
bpm_to_mspt
eM7RON/mu7RON
0
python
def bpm_to_mspt(bpm, res=480): '\n \n ' return ((60000 / res) / bpm)
def bpm_to_mspt(bpm, res=480): '\n \n ' return ((60000 / res) / bpm)<|docstring|>Coverts an integer value of beats per minute to miliseconds per quarter note<|endoftext|>
39eeca84cdd556f6dcbc81f4070dabfc135475641655845e79319bcbaa181d53
def mspt_to_bpm(mspt, res=480): '\n Coverts miliseconds per quarter note to an integer value of beats per minute\n ' return ((60000 / res) / mspt)
Coverts miliseconds per quarter note to an integer value of beats per minute
mu7ron/utils.py
mspt_to_bpm
eM7RON/mu7RON
0
python
def mspt_to_bpm(mspt, res=480): '\n \n ' return ((60000 / res) / mspt)
def mspt_to_bpm(mspt, res=480): '\n \n ' return ((60000 / res) / mspt)<|docstring|>Coverts miliseconds per quarter note to an integer value of beats per minute<|endoftext|>
de06294593f0b33a0ab5c9bf55f0c92e8e9a4ca8f61af36c232476068201edb1
def ticks_per_bar_division(res, **kwargs): '\n Resolution and Time Signature to Ticks Per Measure\n Converts a resolution and time signature to ticks per beat\n if raw=1 the the denominator of the tsig is in the python-midi format \n i.e. a negative power of 2: 2 = quarter, 3 = eight notes etc...\n The bar division (bdiv) is tied to frequency of the pulse. For example,\n values of 1., 2., and 4., will return tpb required to feel pulses every\n whole, half and quarter of a bar respectively. Please note this is \n independent of note divisions.\n ' tsig = kwargs.get('tsig', [4, 4]) raw = kwargs.get('raw', 0) div = kwargs.get('div', 1.0) tsig = (tsig[0] / (utils.nptf(tsig[1]) if raw else tsig[1])) return round((((res * tsig) * 4.0) / div))
Resolution and Time Signature to Ticks Per Measure Converts a resolution and time signature to ticks per beat if raw=1 the the denominator of the tsig is in the python-midi format i.e. a negative power of 2: 2 = quarter, 3 = eight notes etc... The bar division (bdiv) is tied to frequency of the pulse. For example, values of 1., 2., and 4., will return tpb required to feel pulses every whole, half and quarter of a bar respectively. Please note this is independent of note divisions.
mu7ron/utils.py
ticks_per_bar_division
eM7RON/mu7RON
0
python
def ticks_per_bar_division(res, **kwargs): '\n Resolution and Time Signature to Ticks Per Measure\n Converts a resolution and time signature to ticks per beat\n if raw=1 the the denominator of the tsig is in the python-midi format \n i.e. a negative power of 2: 2 = quarter, 3 = eight notes etc...\n The bar division (bdiv) is tied to frequency of the pulse. For example,\n values of 1., 2., and 4., will return tpb required to feel pulses every\n whole, half and quarter of a bar respectively. Please note this is \n independent of note divisions.\n ' tsig = kwargs.get('tsig', [4, 4]) raw = kwargs.get('raw', 0) div = kwargs.get('div', 1.0) tsig = (tsig[0] / (utils.nptf(tsig[1]) if raw else tsig[1])) return round((((res * tsig) * 4.0) / div))
def ticks_per_bar_division(res, **kwargs): '\n Resolution and Time Signature to Ticks Per Measure\n Converts a resolution and time signature to ticks per beat\n if raw=1 the the denominator of the tsig is in the python-midi format \n i.e. a negative power of 2: 2 = quarter, 3 = eight notes etc...\n The bar division (bdiv) is tied to frequency of the pulse. For example,\n values of 1., 2., and 4., will return tpb required to feel pulses every\n whole, half and quarter of a bar respectively. Please note this is \n independent of note divisions.\n ' tsig = kwargs.get('tsig', [4, 4]) raw = kwargs.get('raw', 0) div = kwargs.get('div', 1.0) tsig = (tsig[0] / (utils.nptf(tsig[1]) if raw else tsig[1])) return round((((res * tsig) * 4.0) / div))<|docstring|>Resolution and Time Signature to Ticks Per Measure Converts a resolution and time signature to ticks per beat if raw=1 the the denominator of the tsig is in the python-midi format i.e. a negative power of 2: 2 = quarter, 3 = eight notes etc... The bar division (bdiv) is tied to frequency of the pulse. For example, values of 1., 2., and 4., will return tpb required to feel pulses every whole, half and quarter of a bar respectively. Please note this is independent of note divisions.<|endoftext|>
f045cf35fdfeb1267a5d434228d2b6f1b050b5e96ea072b33569c6c86467f529
def ticks_per_note_division(res, **kwargs) -> int: '\n Args:\n res - int, midi.Pattern.resolution, ticks per quarter note\n div - int, a note division e.g. 1, 2, 4 or 8 for whole, half\n quarter or eight notes respectively. Unlike real music\n it is possible to use a number that is not 1 or 2 and\n not divisible by 4, e.g. 3 or 6.\n Returns:\n int, ticks per note division\n ' div = kwargs.get('div', 1.0) return round(((res * 4.0) / div))
Args: res - int, midi.Pattern.resolution, ticks per quarter note div - int, a note division e.g. 1, 2, 4 or 8 for whole, half quarter or eight notes respectively. Unlike real music it is possible to use a number that is not 1 or 2 and not divisible by 4, e.g. 3 or 6. Returns: int, ticks per note division
mu7ron/utils.py
ticks_per_note_division
eM7RON/mu7RON
0
python
def ticks_per_note_division(res, **kwargs) -> int: '\n Args:\n res - int, midi.Pattern.resolution, ticks per quarter note\n div - int, a note division e.g. 1, 2, 4 or 8 for whole, half\n quarter or eight notes respectively. Unlike real music\n it is possible to use a number that is not 1 or 2 and\n not divisible by 4, e.g. 3 or 6.\n Returns:\n int, ticks per note division\n ' div = kwargs.get('div', 1.0) return round(((res * 4.0) / div))
def ticks_per_note_division(res, **kwargs) -> int: '\n Args:\n res - int, midi.Pattern.resolution, ticks per quarter note\n div - int, a note division e.g. 1, 2, 4 or 8 for whole, half\n quarter or eight notes respectively. Unlike real music\n it is possible to use a number that is not 1 or 2 and\n not divisible by 4, e.g. 3 or 6.\n Returns:\n int, ticks per note division\n ' div = kwargs.get('div', 1.0) return round(((res * 4.0) / div))<|docstring|>Args: res - int, midi.Pattern.resolution, ticks per quarter note div - int, a note division e.g. 1, 2, 4 or 8 for whole, half quarter or eight notes respectively. Unlike real music it is possible to use a number that is not 1 or 2 and not divisible by 4, e.g. 3 or 6. Returns: int, ticks per note division<|endoftext|>
0395010889d8c70cd1c946506a867dd83c1c15a63d826a97eacb890ef56c9871
def translate_tsig(tsig_data): '\n Translates the data from a midi.TimeSignatureEvent into a string \n representation.\n ' tsig_data = list(tsig_data) tsig_data[1] = nptf(tsig_data[1]) tsig_data = (('[' + ', '.join(map(str, tsig_data))) + ']') return tsig_data
Translates the data from a midi.TimeSignatureEvent into a string representation.
mu7ron/utils.py
translate_tsig
eM7RON/mu7RON
0
python
def translate_tsig(tsig_data): '\n Translates the data from a midi.TimeSignatureEvent into a string \n representation.\n ' tsig_data = list(tsig_data) tsig_data[1] = nptf(tsig_data[1]) tsig_data = (('[' + ', '.join(map(str, tsig_data))) + ']') return tsig_data
def translate_tsig(tsig_data): '\n Translates the data from a midi.TimeSignatureEvent into a string \n representation.\n ' tsig_data = list(tsig_data) tsig_data[1] = nptf(tsig_data[1]) tsig_data = (('[' + ', '.join(map(str, tsig_data))) + ']') return tsig_data<|docstring|>Translates the data from a midi.TimeSignatureEvent into a string representation.<|endoftext|>
acc4cadd643d0a7afac6bf132f54827b7a207f80c4c7ca6750f4ca82eb2eab04
def quantize(x: int, q: int) -> int: '\n Will quantize a continuous or discrete range into steps of q\n where anything between 0 and q will be clipped to q.\n ' return ((q * (x // q)) if (not (0 < x <= q)) else q)
Will quantize a continuous or discrete range into steps of q where anything between 0 and q will be clipped to q.
mu7ron/utils.py
quantize
eM7RON/mu7RON
0
python
def quantize(x: int, q: int) -> int: '\n Will quantize a continuous or discrete range into steps of q\n where anything between 0 and q will be clipped to q.\n ' return ((q * (x // q)) if (not (0 < x <= q)) else q)
def quantize(x: int, q: int) -> int: '\n Will quantize a continuous or discrete range into steps of q\n where anything between 0 and q will be clipped to q.\n ' return ((q * (x // q)) if (not (0 < x <= q)) else q)<|docstring|>Will quantize a continuous or discrete range into steps of q where anything between 0 and q will be clipped to q.<|endoftext|>
5ba6c8b7271a837e6e70a34a783c9e82e91c1d89074fc385c3cb47479fd5235a
def dynamic_order(x: int, q: int) -> int: '\n Returns the number of steps that will exist if a range(0, x)\n is quantized into steps of q.\n ' return math.ceil((x / q))
Returns the number of steps that will exist if a range(0, x) is quantized into steps of q.
mu7ron/utils.py
dynamic_order
eM7RON/mu7RON
0
python
def dynamic_order(x: int, q: int) -> int: '\n Returns the number of steps that will exist if a range(0, x)\n is quantized into steps of q.\n ' return math.ceil((x / q))
def dynamic_order(x: int, q: int) -> int: '\n Returns the number of steps that will exist if a range(0, x)\n is quantized into steps of q.\n ' return math.ceil((x / q))<|docstring|>Returns the number of steps that will exist if a range(0, x) is quantized into steps of q.<|endoftext|>
8320649cf9a07910527f8c4ec728f2ff2090be2487330960addb4a6d0e74ff83
def dynamic_range(x: int, q: int) -> list: '\n Returns a list of values if range(0, x) is quantized into steps of q.\n ' return sorted(set((quantize(n, q) for n in range(x))))
Returns a list of values if range(0, x) is quantized into steps of q.
mu7ron/utils.py
dynamic_range
eM7RON/mu7RON
0
python
def dynamic_range(x: int, q: int) -> list: '\n \n ' return sorted(set((quantize(n, q) for n in range(x))))
def dynamic_range(x: int, q: int) -> list: '\n \n ' return sorted(set((quantize(n, q) for n in range(x))))<|docstring|>Returns a list of values if range(0, x) is quantized into steps of q.<|endoftext|>
f8131bc017b002ac6e2d7f363e6c40642ed69e8ba2b0d144b5fb2659e37aaa8f
def __init__(self, path=None, ptrn=None, sn=None): '\n Args:\n ptrn - midi.Pattern()\n path - str, relative path to midi file\n ' if (not ('midi_map' in globals())): global midi_map midi_map = maps.load_midi_map() has_path = (path is not None) has_ptrn = (ptrn is not None) assert any([has_path, has_ptrn]), 'MidiObj requires midi.Pattern or path to a saved midi.Pattern as input to __init__' working_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: working_dir = os.path.join(working_dir, s) if (not os.path.isdir(working_dir)): os.mkdir(working_dir) if (has_ptrn and (not has_path)): assert (sn is not None), 'A midi.ptrn without a path requires a serial number (sn) argument to act as an identifier and generate a unique path' revert_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: revert_dir = os.path.join(revert_dir, s) if (not os.path.isdir(revert_dir)): os.mkdir(revert_dir) if (not sn.endswith('.mid')): sn += '.mid' self.fn = sn self.org_path = os.path.join(revert_dir, sn) self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] elif has_path: path = os.path.normpath(path) self.fn = os.path.split(path)[(- 1)] self.org_path = path self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] if (ptrn is not None): self.ptrn = ptrn else: self.load(path) self._init()
Args: ptrn - midi.Pattern() path - str, relative path to midi file
mu7ron/utils.py
__init__
eM7RON/mu7RON
0
python
def __init__(self, path=None, ptrn=None, sn=None): '\n Args:\n ptrn - midi.Pattern()\n path - str, relative path to midi file\n ' if (not ('midi_map' in globals())): global midi_map midi_map = maps.load_midi_map() has_path = (path is not None) has_ptrn = (ptrn is not None) assert any([has_path, has_ptrn]), 'MidiObj requires midi.Pattern or path to a saved midi.Pattern as input to __init__' working_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: working_dir = os.path.join(working_dir, s) if (not os.path.isdir(working_dir)): os.mkdir(working_dir) if (has_ptrn and (not has_path)): assert (sn is not None), 'A midi.ptrn without a path requires a serial number (sn) argument to act as an identifier and generate a unique path' revert_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: revert_dir = os.path.join(revert_dir, s) if (not os.path.isdir(revert_dir)): os.mkdir(revert_dir) if (not sn.endswith('.mid')): sn += '.mid' self.fn = sn self.org_path = os.path.join(revert_dir, sn) self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] elif has_path: path = os.path.normpath(path) self.fn = os.path.split(path)[(- 1)] self.org_path = path self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] if (ptrn is not None): self.ptrn = ptrn else: self.load(path) self._init()
def __init__(self, path=None, ptrn=None, sn=None): '\n Args:\n ptrn - midi.Pattern()\n path - str, relative path to midi file\n ' if (not ('midi_map' in globals())): global midi_map midi_map = maps.load_midi_map() has_path = (path is not None) has_ptrn = (ptrn is not None) assert any([has_path, has_ptrn]), 'MidiObj requires midi.Pattern or path to a saved midi.Pattern as input to __init__' working_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: working_dir = os.path.join(working_dir, s) if (not os.path.isdir(working_dir)): os.mkdir(working_dir) if (has_ptrn and (not has_path)): assert (sn is not None), 'A midi.ptrn without a path requires a serial number (sn) argument to act as an identifier and generate a unique path' revert_dir = os.path.split(__file__)[0] for s in ['data', 'midi', 'temp', 'working']: revert_dir = os.path.join(revert_dir, s) if (not os.path.isdir(revert_dir)): os.mkdir(revert_dir) if (not sn.endswith('.mid')): sn += '.mid' self.fn = sn self.org_path = os.path.join(revert_dir, sn) self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] elif has_path: path = os.path.normpath(path) self.fn = os.path.split(path)[(- 1)] self.org_path = path self.path = os.path.abspath(os.path.join(working_dir, self.fn)) self.dir = os.path.split(self.path)[0] if (ptrn is not None): self.ptrn = ptrn else: self.load(path) self._init()<|docstring|>Args: ptrn - midi.Pattern() path - str, relative path to midi file<|endoftext|>
07c4514039db5423fd1a18005616258c6ac4ba7f55e5f3390544b3454c1ee580
def __repr__(self): '\n Returns a clearly formatted string for displaying the \n midi objects attributes\n ' repr_str = ('<%s.%s object at %s>' % (self.__class__.__module__, self.__class__.__name__, hex(id(self)))) sep = ('\n' + (24 * ' ')) inst_data = ['i | Group | Instrument', ('-' * 25)] inst_data.extend([(f"{str((x + 1))}{(' ' * (3 - len(str((x + 1)))))}| " + f"{midi_map['grp'][str((x + 1))]}{(' ' * (10 - len(midi_map['grp'][str((x + 1))])))}| {midi_map['inst'][str((x + 1))]}") for x in self.inst_data]) inst_data = sep.join(inst_data) tsig_data = ['tick | data', ('-' * 25)] tsig_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {translate_tsig(x[1])}" for x in self.tsig_data]) tsig_data = sep.join(tsig_data) tmpo_data = ['tick | bpm', ('-' * 25)] tmpo_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {round(x[1], 3)}" for x in self.tmpo_data]) tmpo_data = sep.join(tmpo_data) attr_str = f''' __________________________________________________________________________ File : {self.fn} Location : {self.dir} Repr : {repr_str} Resolution : {self.ptrn.resolution} --------------------------------------------------------------------------- Voices : n. voice : {self.n_vce} n. u_inst : {self.n_uinst} data. : {inst_data} u. trck : {self.uniq_pgm_trcks} Time sig. : n. : {self.n_tsig} data : {tsig_data} Tempo : n. : {self.n_tmpo} data : {tmpo_data} __________________________________________________________________________ ''' return attr_str
Returns a clearly formatted string for displaying the midi objects attributes
mu7ron/utils.py
__repr__
eM7RON/mu7RON
0
python
def __repr__(self): '\n Returns a clearly formatted string for displaying the \n midi objects attributes\n ' repr_str = ('<%s.%s object at %s>' % (self.__class__.__module__, self.__class__.__name__, hex(id(self)))) sep = ('\n' + (24 * ' ')) inst_data = ['i | Group | Instrument', ('-' * 25)] inst_data.extend([(f"{str((x + 1))}{(' ' * (3 - len(str((x + 1)))))}| " + f"{midi_map['grp'][str((x + 1))]}{(' ' * (10 - len(midi_map['grp'][str((x + 1))])))}| {midi_map['inst'][str((x + 1))]}") for x in self.inst_data]) inst_data = sep.join(inst_data) tsig_data = ['tick | data', ('-' * 25)] tsig_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {translate_tsig(x[1])}" for x in self.tsig_data]) tsig_data = sep.join(tsig_data) tmpo_data = ['tick | bpm', ('-' * 25)] tmpo_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {round(x[1], 3)}" for x in self.tmpo_data]) tmpo_data = sep.join(tmpo_data) attr_str = f' __________________________________________________________________________ File : {self.fn} Location : {self.dir} Repr : {repr_str} Resolution : {self.ptrn.resolution} --------------------------------------------------------------------------- Voices : n. voice : {self.n_vce} n. u_inst : {self.n_uinst} data. : {inst_data} u. trck : {self.uniq_pgm_trcks} Time sig. : n. : {self.n_tsig} data : {tsig_data} Tempo : n. : {self.n_tmpo} data : {tmpo_data} __________________________________________________________________________ ' return attr_str
def __repr__(self): '\n Returns a clearly formatted string for displaying the \n midi objects attributes\n ' repr_str = ('<%s.%s object at %s>' % (self.__class__.__module__, self.__class__.__name__, hex(id(self)))) sep = ('\n' + (24 * ' ')) inst_data = ['i | Group | Instrument', ('-' * 25)] inst_data.extend([(f"{str((x + 1))}{(' ' * (3 - len(str((x + 1)))))}| " + f"{midi_map['grp'][str((x + 1))]}{(' ' * (10 - len(midi_map['grp'][str((x + 1))])))}| {midi_map['inst'][str((x + 1))]}") for x in self.inst_data]) inst_data = sep.join(inst_data) tsig_data = ['tick | data', ('-' * 25)] tsig_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {translate_tsig(x[1])}" for x in self.tsig_data]) tsig_data = sep.join(tsig_data) tmpo_data = ['tick | bpm', ('-' * 25)] tmpo_data.extend([f"{x[0]}{(' ' * (10 - len(str(x[0]))))}| {round(x[1], 3)}" for x in self.tmpo_data]) tmpo_data = sep.join(tmpo_data) attr_str = f' __________________________________________________________________________ File : {self.fn} Location : {self.dir} Repr : {repr_str} Resolution : {self.ptrn.resolution} --------------------------------------------------------------------------- Voices : n. voice : {self.n_vce} n. u_inst : {self.n_uinst} data. : {inst_data} u. trck : {self.uniq_pgm_trcks} Time sig. : n. : {self.n_tsig} data : {tsig_data} Tempo : n. : {self.n_tmpo} data : {tmpo_data} __________________________________________________________________________ ' return attr_str<|docstring|>Returns a clearly formatted string for displaying the midi objects attributes<|endoftext|>
75f4947c5e45f0f25dd2a051fa473b5aa1eaccae7d8a17193e0b7efa7b85863e
def play(self, t=None): '\n Play audio of self.ptrn\n ' play(self.ptrn, t)
Play audio of self.ptrn
mu7ron/utils.py
play
eM7RON/mu7RON
0
python
def play(self, t=None): '\n \n ' play(self.ptrn, t)
def play(self, t=None): '\n \n ' play(self.ptrn, t)<|docstring|>Play audio of self.ptrn<|endoftext|>
d0f89428b14ed23b5e74e7c330bc8e869fa7d2196cee66934bd5165621cb9f0c
@staticmethod def stop(): '\n Stop audio of self.ptrn\n ' pygame.mixer.music.stop()
Stop audio of self.ptrn
mu7ron/utils.py
stop
eM7RON/mu7RON
0
python
@staticmethod def stop(): '\n \n ' pygame.mixer.music.stop()
@staticmethod def stop(): '\n \n ' pygame.mixer.music.stop()<|docstring|>Stop audio of self.ptrn<|endoftext|>
8f17522f2b3646ce4b9596f6238fd394acccc3f93471abb1d000086f18ef95ba
def save(self, path=None): '\n Store self at self.path\n ' if (path is not None): self.path = path midi.write_midifile(self.path, self.ptrn)
Store self at self.path
mu7ron/utils.py
save
eM7RON/mu7RON
0
python
def save(self, path=None): '\n \n ' if (path is not None): self.path = path midi.write_midifile(self.path, self.ptrn)
def save(self, path=None): '\n \n ' if (path is not None): self.path = path midi.write_midifile(self.path, self.ptrn)<|docstring|>Store self at self.path<|endoftext|>
999cb5889c41714ae10efc103f370818a651ec9b18db9bdd3c35765002a534bc
def load(self, path=None): '\n Load from backed up MidiObj\n ' self.ptrn = midi.read_midifile((path if (path is not None) else self.path))
Load from backed up MidiObj
mu7ron/utils.py
load
eM7RON/mu7RON
0
python
def load(self, path=None): '\n \n ' self.ptrn = midi.read_midifile((path if (path is not None) else self.path))
def load(self, path=None): '\n \n ' self.ptrn = midi.read_midifile((path if (path is not None) else self.path))<|docstring|>Load from backed up MidiObj<|endoftext|>
5aa6cd6db7a113024676a46a50d781d39d76f2d1e9c1533f0609c6330d287c6f
def revert(self): '\n Load up the original midi file and start fresh\n ' self.load(self.org_path) self.save()
Load up the original midi file and start fresh
mu7ron/utils.py
revert
eM7RON/mu7RON
0
python
def revert(self): '\n \n ' self.load(self.org_path) self.save()
def revert(self): '\n \n ' self.load(self.org_path) self.save()<|docstring|>Load up the original midi file and start fresh<|endoftext|>
22249b63ebc50ffa15c966eb42952d7ddc825632150347817934808978fb7a26
def chge_inst(self, idx, new_inst): '\n Change the instruments of some voices (ProgramChangeEvents)\n \n Args:\n idx - list, numbers of voices/insts to change, please note that they\n are numbered in the order in which they appear when looping\n ptrn > trck > evnt\n new_inst - int, the number of the new midi instrument\n ' i = 0 for trck in self.ptrn: for evnt in trck: if isinstance(evnt, midi.ProgramChangeEvent): if (i in idx): evnt.data[0] = new_inst i += 1 self.save()
Change the instruments of some voices (ProgramChangeEvents) Args: idx - list, numbers of voices/insts to change, please note that they are numbered in the order in which they appear when looping ptrn > trck > evnt new_inst - int, the number of the new midi instrument
mu7ron/utils.py
chge_inst
eM7RON/mu7RON
0
python
def chge_inst(self, idx, new_inst): '\n Change the instruments of some voices (ProgramChangeEvents)\n \n Args:\n idx - list, numbers of voices/insts to change, please note that they\n are numbered in the order in which they appear when looping\n ptrn > trck > evnt\n new_inst - int, the number of the new midi instrument\n ' i = 0 for trck in self.ptrn: for evnt in trck: if isinstance(evnt, midi.ProgramChangeEvent): if (i in idx): evnt.data[0] = new_inst i += 1 self.save()
def chge_inst(self, idx, new_inst): '\n Change the instruments of some voices (ProgramChangeEvents)\n \n Args:\n idx - list, numbers of voices/insts to change, please note that they\n are numbered in the order in which they appear when looping\n ptrn > trck > evnt\n new_inst - int, the number of the new midi instrument\n ' i = 0 for trck in self.ptrn: for evnt in trck: if isinstance(evnt, midi.ProgramChangeEvent): if (i in idx): evnt.data[0] = new_inst i += 1 self.save()<|docstring|>Change the instruments of some voices (ProgramChangeEvents) Args: idx - list, numbers of voices/insts to change, please note that they are numbered in the order in which they appear when looping ptrn > trck > evnt new_inst - int, the number of the new midi instrument<|endoftext|>
58a243764fbf04fd43a1f74dc1cb62dc22a24dca44f17ce78181fc4c989eeb4d
def make_sampler(things): ' Make generator that samples randomly from a list of things. ' nb_things = len(things) shuffled_things = deepcopy(things) for i in cycle(range(nb_things)): if (i == 0): random.shuffle(shuffled_things) (yield shuffled_things[i])
Make generator that samples randomly from a list of things.
crim_data.py
make_sampler
jfarrugia-uom/hyperstar
0
python
def make_sampler(things): ' ' nb_things = len(things) shuffled_things = deepcopy(things) for i in cycle(range(nb_things)): if (i == 0): random.shuffle(shuffled_things) (yield shuffled_things[i])
def make_sampler(things): ' ' nb_things = len(things) shuffled_things = deepcopy(things) for i in cycle(range(nb_things)): if (i == 0): random.shuffle(shuffled_things) (yield shuffled_things[i])<|docstring|>Make generator that samples randomly from a list of things.<|endoftext|>
43315839d1dd6e25cff06a1b81b260f7904fa69ad30c24cb3909bacc51ffbcd7
def tearDown(self): 'delete the file we have created' os.unlink('__unittests.xml')
delete the file we have created
pywinauto/unittests/test_xml_helpers.py
tearDown
smalinux/pywinauto
3,544
python
def tearDown(self): os.unlink('__unittests.xml')
def tearDown(self): os.unlink('__unittests.xml')<|docstring|>delete the file we have created<|endoftext|>
4bbaf4460c66ad5821d9bee7bf88cf8d52d18b780f5f56c48a66067a00988100
def assertReadWriteSame(self, props): 'Make sure that roundtripping produces identical file' WriteDialogToFile('__unittests.xml', props) read_props = ReadPropertiesFromFile('__unittests.xml') self.assertEqual(props, read_props)
Make sure that roundtripping produces identical file
pywinauto/unittests/test_xml_helpers.py
assertReadWriteSame
smalinux/pywinauto
3,544
python
def assertReadWriteSame(self, props): WriteDialogToFile('__unittests.xml', props) read_props = ReadPropertiesFromFile('__unittests.xml') self.assertEqual(props, read_props)
def assertReadWriteSame(self, props): WriteDialogToFile('__unittests.xml', props) read_props = ReadPropertiesFromFile('__unittests.xml') self.assertEqual(props, read_props)<|docstring|>Make sure that roundtripping produces identical file<|endoftext|>
282757eb608138819d4beb1cd36b7a20f0728396768e98cb5224961fefc3c2b3
def testOneUnicode(self): 'Test writing/reading a unicode string' props = [dict(test=u'hiya')] self.assertReadWriteSame(props)
Test writing/reading a unicode string
pywinauto/unittests/test_xml_helpers.py
testOneUnicode
smalinux/pywinauto
3,544
python
def testOneUnicode(self): props = [dict(test=u'hiya')] self.assertReadWriteSame(props)
def testOneUnicode(self): props = [dict(test=u'hiya')] self.assertReadWriteSame(props)<|docstring|>Test writing/reading a unicode string<|endoftext|>
740129ccb805d3f0ba1f4316416920133cbed7dce4343d3ad6ca9aeb53a61567
def testOneString(self): 'Test writing/reading a string' props = [dict(test='hiya')] self.assertReadWriteSame(props)
Test writing/reading a string
pywinauto/unittests/test_xml_helpers.py
testOneString
smalinux/pywinauto
3,544
python
def testOneString(self): props = [dict(test='hiya')] self.assertReadWriteSame(props)
def testOneString(self): props = [dict(test='hiya')] self.assertReadWriteSame(props)<|docstring|>Test writing/reading a string<|endoftext|>
1e57ac88f6f872ace5e8bbacc10b3dec06d7255647963246a2d282a3525d2b41
def testSomeEscapes(self): 'Test writing/reading a dictionary with some escape sequences' test_string = [] for i in range(0, 50000): test_string.append(six.unichr(i)) test_string = ''.join(test_string) props = [dict(test=test_string)] self.assertReadWriteSame(props)
Test writing/reading a dictionary with some escape sequences
pywinauto/unittests/test_xml_helpers.py
testSomeEscapes
smalinux/pywinauto
3,544
python
def testSomeEscapes(self): test_string = [] for i in range(0, 50000): test_string.append(six.unichr(i)) test_string = .join(test_string) props = [dict(test=test_string)] self.assertReadWriteSame(props)
def testSomeEscapes(self): test_string = [] for i in range(0, 50000): test_string.append(six.unichr(i)) test_string = .join(test_string) props = [dict(test=test_string)] self.assertReadWriteSame(props)<|docstring|>Test writing/reading a dictionary with some escape sequences<|endoftext|>
553fa38f7989cce1fad87da457c5700d5d8ceee6f681459604970eadde7f3dc9
def testOneBool(self): 'Test writing/reading Bool' props = [dict(test=True)] self.assertReadWriteSame(props)
Test writing/reading Bool
pywinauto/unittests/test_xml_helpers.py
testOneBool
smalinux/pywinauto
3,544
python
def testOneBool(self): props = [dict(test=True)] self.assertReadWriteSame(props)
def testOneBool(self): props = [dict(test=True)] self.assertReadWriteSame(props)<|docstring|>Test writing/reading Bool<|endoftext|>
5b8b8102c3cf8527d73e1477c6e4b6baa499907d8a8bd9b44424f4a7ef581955
def testOneList(self): 'Test writing/reading a list' props = [dict(test=[1, 2, 3, 4, 5, 6])] self.assertReadWriteSame(props)
Test writing/reading a list
pywinauto/unittests/test_xml_helpers.py
testOneList
smalinux/pywinauto
3,544
python
def testOneList(self): props = [dict(test=[1, 2, 3, 4, 5, 6])] self.assertReadWriteSame(props)
def testOneList(self): props = [dict(test=[1, 2, 3, 4, 5, 6])] self.assertReadWriteSame(props)<|docstring|>Test writing/reading a list<|endoftext|>
e7a77c9ff86db833c803842724b6beb39d799e14d826afa9cbc2d33044856e71
def testOneDict(self): 'Test writing/reading a dictionary with one element' props = [dict(test_value=dict(test=1))] self.assertReadWriteSame(props)
Test writing/reading a dictionary with one element
pywinauto/unittests/test_xml_helpers.py
testOneDict
smalinux/pywinauto
3,544
python
def testOneDict(self): props = [dict(test_value=dict(test=1))] self.assertReadWriteSame(props)
def testOneDict(self): props = [dict(test_value=dict(test=1))] self.assertReadWriteSame(props)<|docstring|>Test writing/reading a dictionary with one element<|endoftext|>
2355052855e07fd89049999f8f4aa9d96acde92a4f1a56aa1ac786db28f36487
def testOneLong(self): 'Test writing/reading one long is correct' props = [dict(test=1)] self.assertReadWriteSame(props)
Test writing/reading one long is correct
pywinauto/unittests/test_xml_helpers.py
testOneLong
smalinux/pywinauto
3,544
python
def testOneLong(self): props = [dict(test=1)] self.assertReadWriteSame(props)
def testOneLong(self): props = [dict(test=1)] self.assertReadWriteSame(props)<|docstring|>Test writing/reading one long is correct<|endoftext|>
c8dd182a14d37d2c0d9fc8a281866baab18d5499990646bde655b88f3c3535fc
def testLOGFONTW(self): 'Test writing/reading one LOGFONTW is correct' font = LOGFONTW() font.lfWeight = 23 font.lfFaceName = u'wowow' props = [dict(test=font)] self.assertReadWriteSame(props)
Test writing/reading one LOGFONTW is correct
pywinauto/unittests/test_xml_helpers.py
testLOGFONTW
smalinux/pywinauto
3,544
python
def testLOGFONTW(self): font = LOGFONTW() font.lfWeight = 23 font.lfFaceName = u'wowow' props = [dict(test=font)] self.assertReadWriteSame(props)
def testLOGFONTW(self): font = LOGFONTW() font.lfWeight = 23 font.lfFaceName = u'wowow' props = [dict(test=font)] self.assertReadWriteSame(props)<|docstring|>Test writing/reading one LOGFONTW is correct<|endoftext|>
9fc00243d1c58cc9885a4e6b35f524e124dc83745975e895fad40fe93bde96ef
def testRECT(self): 'Test writing/reading one RECT is correct' props = [dict(test=RECT(1, 2, 3, 4))] self.assertReadWriteSame(props)
Test writing/reading one RECT is correct
pywinauto/unittests/test_xml_helpers.py
testRECT
smalinux/pywinauto
3,544
python
def testRECT(self): props = [dict(test=RECT(1, 2, 3, 4))] self.assertReadWriteSame(props)
def testRECT(self): props = [dict(test=RECT(1, 2, 3, 4))] self.assertReadWriteSame(props)<|docstring|>Test writing/reading one RECT is correct<|endoftext|>
45bb72b310afc96d54a60fcaae4352678079f3a7db96768757aa8d9fc8be44c6
def testTwoLong(self): 'Test writing/reading two longs is correct' props = [dict(test=1), dict(test_blah=2)] self.assertReadWriteSame(props)
Test writing/reading two longs is correct
pywinauto/unittests/test_xml_helpers.py
testTwoLong
smalinux/pywinauto
3,544
python
def testTwoLong(self): props = [dict(test=1), dict(test_blah=2)] self.assertReadWriteSame(props)
def testTwoLong(self): props = [dict(test=1), dict(test_blah=2)] self.assertReadWriteSame(props)<|docstring|>Test writing/reading two longs is correct<|endoftext|>
3d4c93b37ebca02368ed29660287cd41b5d7dccd3d5ae1d70b284d44d3802343
def testEmptyList(self): 'Test writing/reading empty list' props = [dict(test=[])] self.assertReadWriteSame(props)
Test writing/reading empty list
pywinauto/unittests/test_xml_helpers.py
testEmptyList
smalinux/pywinauto
3,544
python
def testEmptyList(self): props = [dict(test=[])] self.assertReadWriteSame(props)
def testEmptyList(self): props = [dict(test=[])] self.assertReadWriteSame(props)<|docstring|>Test writing/reading empty list<|endoftext|>
67e9d3ba0906ea0d657ce79c404a5e981e14411db5377ac751efd7aae1d3572e
def testEmptyDict(self): 'Test writing/reading empty dict' props = [dict(test={})] self.assertReadWriteSame(props)
Test writing/reading empty dict
pywinauto/unittests/test_xml_helpers.py
testEmptyDict
smalinux/pywinauto
3,544
python
def testEmptyDict(self): props = [dict(test={})] self.assertReadWriteSame(props)
def testEmptyDict(self): props = [dict(test={})] self.assertReadWriteSame(props)<|docstring|>Test writing/reading empty dict<|endoftext|>
427ef7f6a292336f8eeacd64d06fd913ec294badfefff7bb1a299e424abd3892
@add_group('sudo') async def cmd_sudo(self, *datas: Union[(discord.Message, String)], destination: Optional[common.Channel]=None, from_attachment: bool=True, mention: bool=False): '\n ->type More admin commands\n ->signature pg!sudo <*datas> [destination=] [from_attachment=True]\n ->description Send a message through the bot\n ->extended description\n Send a sequence of messages contain text from the given\n data using the specified arguments.\n\n __Args__:\n `*datas: (Message|String)`\n > A sequence of discord messages whose text\n > or text attachment should be used as input,\n > or strings.\n\n `destination (TextChannel) = `\n > A destination channel to send the output to.\n\n `from_attachment (bool) = True`\n > Whether the attachment of an input message should be\n > used to create a message.\n\n `mention (bool) = False`\n > Whether any mentions in the given input text\n > should ping their target. If set to `True`,\n > any role/user/member that the bot is allowed to ping will\n > be pinged.\n\n __Returns__:\n > One or more generated messages based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo "lol" "that" "was" "funny /s" destination=#general\n pg!sudo 987654321987654321 "Additionally, ..." 123456739242423 from_attachment=True\n -----\n Implement pg!sudo, for admins to send messages via the bot\n ' if (destination is None): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') for (i, data) in enumerate(datas): if isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (not (i % 50)): (await asyncio.sleep(0)) output_strings = [] load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False), ('⠀', '`...`', False))) data_count = len(datas) for (i, data) in enumerate(datas): if ((data_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Inputs', value=(f'''`{i}/{data_count}` inputs processed {((i / data_count) * 100):.01f}% | ''' + utils.progress_bar((i / data_count), divisions=30))), 0)) attachment_msg = None if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string output_strings.append(msg_text) elif isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: output_strings.append(src_msg_txt) else: raise BotException(f'Input {i}: No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data. If you want to retrieve the content of the given message(s) instead, set the` from_attachment=` argument to `False`') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException(f'Input {i}: Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') (await asyncio.sleep(0)) if (not datas): data_count = 1 attachment_msg = self.invoke_msg if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'''`{data_count}/{data_count}` inputs processed 100% | ''' + utils.progress_bar(1.0, divisions=30))), 0)) allowed_mentions = (discord.AllowedMentions.all() if mention else discord.AllowedMentions.none()) output_count = len(output_strings) for (j, msg_txt) in enumerate(output_strings): if ((output_count > 2) and (not (j % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creating Messages', value=(f'''`{j}/{output_count}` messages created {((j / output_count) * 100):.01f}% | ''' + utils.progress_bar((j / output_count), divisions=30))), 1)) (await destination.send(content=msg_txt, allowed_mentions=allowed_mentions)) if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creation Completed', value=(f'''`{output_count}/{output_count}` messages created 100% | ''' + utils.progress_bar(1.0, divisions=30))), 1)) try: (await self.invoke_msg.delete()) (await self.response_msg.delete(delay=(10.0 if (data_count > 2) else 0.0))) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo <*datas> [destination=] [from_attachment=True] ->description Send a message through the bot ->extended description Send a sequence of messages contain text from the given data using the specified arguments. __Args__: `*datas: (Message|String)` > A sequence of discord messages whose text > or text attachment should be used as input, > or strings. `destination (TextChannel) = ` > A destination channel to send the output to. `from_attachment (bool) = True` > Whether the attachment of an input message should be > used to create a message. `mention (bool) = False` > Whether any mentions in the given input text > should ping their target. If set to `True`, > any role/user/member that the bot is allowed to ping will > be pinged. __Returns__: > One or more generated messages based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo "lol" "that" "was" "funny /s" destination=#general pg!sudo 987654321987654321 "Additionally, ..." 123456739242423 from_attachment=True ----- Implement pg!sudo, for admins to send messages via the bot
pgbot/commands/admin/sudo.py
cmd_sudo
gresm/PygameCommunityBot
77
python
@add_group('sudo') async def cmd_sudo(self, *datas: Union[(discord.Message, String)], destination: Optional[common.Channel]=None, from_attachment: bool=True, mention: bool=False): '\n ->type More admin commands\n ->signature pg!sudo <*datas> [destination=] [from_attachment=True]\n ->description Send a message through the bot\n ->extended description\n Send a sequence of messages contain text from the given\n data using the specified arguments.\n\n __Args__:\n `*datas: (Message|String)`\n > A sequence of discord messages whose text\n > or text attachment should be used as input,\n > or strings.\n\n `destination (TextChannel) = `\n > A destination channel to send the output to.\n\n `from_attachment (bool) = True`\n > Whether the attachment of an input message should be\n > used to create a message.\n\n `mention (bool) = False`\n > Whether any mentions in the given input text\n > should ping their target. If set to `True`,\n > any role/user/member that the bot is allowed to ping will\n > be pinged.\n\n __Returns__:\n > One or more generated messages based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo "lol" "that" "was" "funny /s" destination=#general\n pg!sudo 987654321987654321 "Additionally, ..." 123456739242423 from_attachment=True\n -----\n Implement pg!sudo, for admins to send messages via the bot\n ' if (destination is None): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') for (i, data) in enumerate(datas): if isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (not (i % 50)): (await asyncio.sleep(0)) output_strings = [] load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False), ('⠀', '`...`', False))) data_count = len(datas) for (i, data) in enumerate(datas): if ((data_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Inputs', value=(f'`{i}/{data_count}` inputs processed {((i / data_count) * 100):.01f}% | ' + utils.progress_bar((i / data_count), divisions=30))), 0)) attachment_msg = None if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string output_strings.append(msg_text) elif isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: output_strings.append(src_msg_txt) else: raise BotException(f'Input {i}: No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data. If you want to retrieve the content of the given message(s) instead, set the` from_attachment=` argument to `False`') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException(f'Input {i}: Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') (await asyncio.sleep(0)) if (not datas): data_count = 1 attachment_msg = self.invoke_msg if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{data_count}/{data_count}` inputs processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) allowed_mentions = (discord.AllowedMentions.all() if mention else discord.AllowedMentions.none()) output_count = len(output_strings) for (j, msg_txt) in enumerate(output_strings): if ((output_count > 2) and (not (j % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creating Messages', value=(f'`{j}/{output_count}` messages created {((j / output_count) * 100):.01f}% | ' + utils.progress_bar((j / output_count), divisions=30))), 1)) (await destination.send(content=msg_txt, allowed_mentions=allowed_mentions)) if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creation Completed', value=(f'`{output_count}/{output_count}` messages created 100% | ' + utils.progress_bar(1.0, divisions=30))), 1)) try: (await self.invoke_msg.delete()) (await self.response_msg.delete(delay=(10.0 if (data_count > 2) else 0.0))) except discord.NotFound: pass
@add_group('sudo') async def cmd_sudo(self, *datas: Union[(discord.Message, String)], destination: Optional[common.Channel]=None, from_attachment: bool=True, mention: bool=False): '\n ->type More admin commands\n ->signature pg!sudo <*datas> [destination=] [from_attachment=True]\n ->description Send a message through the bot\n ->extended description\n Send a sequence of messages contain text from the given\n data using the specified arguments.\n\n __Args__:\n `*datas: (Message|String)`\n > A sequence of discord messages whose text\n > or text attachment should be used as input,\n > or strings.\n\n `destination (TextChannel) = `\n > A destination channel to send the output to.\n\n `from_attachment (bool) = True`\n > Whether the attachment of an input message should be\n > used to create a message.\n\n `mention (bool) = False`\n > Whether any mentions in the given input text\n > should ping their target. If set to `True`,\n > any role/user/member that the bot is allowed to ping will\n > be pinged.\n\n __Returns__:\n > One or more generated messages based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo "lol" "that" "was" "funny /s" destination=#general\n pg!sudo 987654321987654321 "Additionally, ..." 123456739242423 from_attachment=True\n -----\n Implement pg!sudo, for admins to send messages via the bot\n ' if (destination is None): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') for (i, data) in enumerate(datas): if isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (not (i % 50)): (await asyncio.sleep(0)) output_strings = [] load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False), ('⠀', '`...`', False))) data_count = len(datas) for (i, data) in enumerate(datas): if ((data_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Inputs', value=(f'`{i}/{data_count}` inputs processed {((i / data_count) * 100):.01f}% | ' + utils.progress_bar((i / data_count), divisions=30))), 0)) attachment_msg = None if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string output_strings.append(msg_text) elif isinstance(data, discord.Message): if (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: output_strings.append(src_msg_txt) else: raise BotException(f'Input {i}: No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data. If you want to retrieve the content of the given message(s) instead, set the` from_attachment=` argument to `False`') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException(f'Input {i}: No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException(f'Input {i}: Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') (await asyncio.sleep(0)) if (not datas): data_count = 1 attachment_msg = self.invoke_msg if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (0 < len(msg_text) <= 2000): output_strings.append(msg_text) else: raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{data_count}/{data_count}` inputs processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) allowed_mentions = (discord.AllowedMentions.all() if mention else discord.AllowedMentions.none()) output_count = len(output_strings) for (j, msg_txt) in enumerate(output_strings): if ((output_count > 2) and (not (j % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creating Messages', value=(f'`{j}/{output_count}` messages created {((j / output_count) * 100):.01f}% | ' + utils.progress_bar((j / output_count), divisions=30))), 1)) (await destination.send(content=msg_txt, allowed_mentions=allowed_mentions)) if (data_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Creation Completed', value=(f'`{output_count}/{output_count}` messages created 100% | ' + utils.progress_bar(1.0, divisions=30))), 1)) try: (await self.invoke_msg.delete()) (await self.response_msg.delete(delay=(10.0 if (data_count > 2) else 0.0))) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo <*datas> [destination=] [from_attachment=True] ->description Send a message through the bot ->extended description Send a sequence of messages contain text from the given data using the specified arguments. __Args__: `*datas: (Message|String)` > A sequence of discord messages whose text > or text attachment should be used as input, > or strings. `destination (TextChannel) = ` > A destination channel to send the output to. `from_attachment (bool) = True` > Whether the attachment of an input message should be > used to create a message. `mention (bool) = False` > Whether any mentions in the given input text > should ping their target. If set to `True`, > any role/user/member that the bot is allowed to ping will > be pinged. __Returns__: > One or more generated messages based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo "lol" "that" "was" "funny /s" destination=#general pg!sudo 987654321987654321 "Additionally, ..." 123456739242423 from_attachment=True ----- Implement pg!sudo, for admins to send messages via the bot<|endoftext|>
c809ca22ad184d7650b2dbda3be49880601785bff96aee98b911cda88130cc85
@add_group('sudo', 'edit') async def cmd_sudo_edit(self, msg: discord.Message, data: Union[(discord.Message, String)], from_attachment: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_edit <msg> <data> [from_attachment=True]\n ->description Replace a message that the bot sent\n ->extended description\n Replace the text content of a message using the given attributes.\n\n __Args__:\n `msg: (Message)`\n > A discord message whose text content\n > should be replaced.\n\n `data: (Message|String)`\n > The text data that should be used to replace\n > the input message text.\n\n `from_attachment (bool) = True`\n > Whether the attachment of the input message in `data`\n > should be used to edit the target message. If set to\n > `False`, the text content of the input message in\n > `data` will be used.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo edit 9876543211234676789 "bruh"\n pg!sudo edit 1234567890876543345/9876543211234676789 2345678427483744843\n -----\n ' if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') elif (isinstance(data, discord.Message) and (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',)))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') attachment_msg: Optional[discord.Message] = None msg_text = '' if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string elif isinstance(data, discord.Message): if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: msg_text = src_msg_txt else: raise BotException('No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (not (0 < len(msg_text) <= 2000)): raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') try: (await msg.edit(content=msg_text)) except discord.HTTPException as e: raise BotException('An exception occured while handling the command!', e.args[0]) try: (await self.invoke_msg.delete()) (await self.response_msg.delete()) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo_edit <msg> <data> [from_attachment=True] ->description Replace a message that the bot sent ->extended description Replace the text content of a message using the given attributes. __Args__: `msg: (Message)` > A discord message whose text content > should be replaced. `data: (Message|String)` > The text data that should be used to replace > the input message text. `from_attachment (bool) = True` > Whether the attachment of the input message in `data` > should be used to edit the target message. If set to > `False`, the text content of the input message in > `data` will be used. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo edit 9876543211234676789 "bruh" pg!sudo edit 1234567890876543345/9876543211234676789 2345678427483744843 -----
pgbot/commands/admin/sudo.py
cmd_sudo_edit
gresm/PygameCommunityBot
77
python
@add_group('sudo', 'edit') async def cmd_sudo_edit(self, msg: discord.Message, data: Union[(discord.Message, String)], from_attachment: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_edit <msg> <data> [from_attachment=True]\n ->description Replace a message that the bot sent\n ->extended description\n Replace the text content of a message using the given attributes.\n\n __Args__:\n `msg: (Message)`\n > A discord message whose text content\n > should be replaced.\n\n `data: (Message|String)`\n > The text data that should be used to replace\n > the input message text.\n\n `from_attachment (bool) = True`\n > Whether the attachment of the input message in `data`\n > should be used to edit the target message. If set to\n > `False`, the text content of the input message in\n > `data` will be used.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo edit 9876543211234676789 "bruh"\n pg!sudo edit 1234567890876543345/9876543211234676789 2345678427483744843\n -----\n ' if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') elif (isinstance(data, discord.Message) and (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',)))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') attachment_msg: Optional[discord.Message] = None msg_text = if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string elif isinstance(data, discord.Message): if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: msg_text = src_msg_txt else: raise BotException('No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (not (0 < len(msg_text) <= 2000)): raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') try: (await msg.edit(content=msg_text)) except discord.HTTPException as e: raise BotException('An exception occured while handling the command!', e.args[0]) try: (await self.invoke_msg.delete()) (await self.response_msg.delete()) except discord.NotFound: pass
@add_group('sudo', 'edit') async def cmd_sudo_edit(self, msg: discord.Message, data: Union[(discord.Message, String)], from_attachment: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_edit <msg> <data> [from_attachment=True]\n ->description Replace a message that the bot sent\n ->extended description\n Replace the text content of a message using the given attributes.\n\n __Args__:\n `msg: (Message)`\n > A discord message whose text content\n > should be replaced.\n\n `data: (Message|String)`\n > The text data that should be used to replace\n > the input message text.\n\n `from_attachment (bool) = True`\n > Whether the attachment of the input message in `data`\n > should be used to edit the target message. If set to\n > `False`, the text content of the input message in\n > `data` will be used.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command\n pg!sudo edit 9876543211234676789 "bruh"\n pg!sudo edit 1234567890876543345/9876543211234676789 2345678427483744843\n -----\n ' if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') elif (isinstance(data, discord.Message) and (not utils.check_channel_permissions(self.author, data.channel, permissions=('view_channel',)))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') attachment_msg: Optional[discord.Message] = None msg_text = if isinstance(data, String): if (not data.string): attachment_msg = self.invoke_msg else: msg_text = data.string elif isinstance(data, discord.Message): if from_attachment: attachment_msg = data else: src_msg_txt = data.content if src_msg_txt: msg_text = src_msg_txt else: raise BotException('No message text found!', 'The message given as input does not have any text content.') if attachment_msg: if (not attachment_msg.attachments): raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') for attachment in attachment_msg.attachments: if ((attachment.content_type is not None) and attachment.content_type.startswith('text')): attachment_obj = attachment break else: raise BotException('No valid attachment found in message.', 'It must be a `.txt` file containing text data.') msg_text = (await attachment_obj.read()) msg_text = msg_text.decode() if (not (0 < len(msg_text) <= 2000)): raise BotException('Too little/many characters!', 'a Discord message must contain at least one character and cannot contain more than 2000.') try: (await msg.edit(content=msg_text)) except discord.HTTPException as e: raise BotException('An exception occured while handling the command!', e.args[0]) try: (await self.invoke_msg.delete()) (await self.response_msg.delete()) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo_edit <msg> <data> [from_attachment=True] ->description Replace a message that the bot sent ->extended description Replace the text content of a message using the given attributes. __Args__: `msg: (Message)` > A discord message whose text content > should be replaced. `data: (Message|String)` > The text data that should be used to replace > the input message text. `from_attachment (bool) = True` > Whether the attachment of the input message in `data` > should be used to edit the target message. If set to > `False`, the text content of the input message in > `data` will be used. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo edit 9876543211234676789 "bruh" pg!sudo edit 1234567890876543345/9876543211234676789 2345678427483744843 -----<|endoftext|>
635d487dece368605e9f0abe315ceaf5d39adeb7a2e70249398dd08e85b71f2c
@add_group('sudo', 'swap') async def cmd_sudo_swap(self, msg_a: discord.Message, msg_b: discord.Message, embeds: bool=True): '\n ->type More admin commands\n ->signature pg!sudo swap <message> <message>\n ->description Swap message contents and embeds between messages through the bot\n ->extended description\n Swap message contents and embeds between the two given messages.\n\n __Args__:\n `message_a: (Message)`\n > A discord message whose embed\n > should be swapped with that of `message_b`.\n\n `message_b: (Message)`\n > Another discord message whose embed\n > should be swapped with that of `message_a`.\n\n `embeds: (bool) = True`\n > If set to `True`, the first embeds will also\n > (when present) be swapped between the given messages.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command pg!sudo swap 123456789123456789 69696969969669420\n -----\n ' if ((not utils.check_channel_permissions(self.author, msg_a.channel, permissions=('view_channel', 'send_messages'))) or (not utils.check_channel_permissions(self.author, msg_b.channel, permissions=('view_channel', 'send_messages')))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (((not msg_a.content) and (not msg_a.embeds)) or ((not msg_b.content) and (not msg_b.embeds))): raise BotException('Cannot execute command:', 'Not enough data found in one or more of the given messages.') elif (common.bot.user.id not in (msg_a.author.id, msg_b.author.id)): raise BotException('Cannot execute command:', f'Both messages must have been authored by me, {common.bot.user.mention}.') msg_embed_a = (msg_a.embeds[0] if msg_a.embeds else None) msg_embed_b = (msg_b.embeds[0] if msg_b.embeds else None) msg_content_a = msg_a.content msg_content_b = msg_b.content if embeds: (await msg_a.edit(content=msg_content_b, embed=msg_embed_b)) (await msg_b.edit(content=msg_content_a, embed=msg_embed_a)) else: (await msg_a.edit(content=msg_content_b)) (await msg_b.edit(content=msg_content_a)) try: (await self.response_msg.delete()) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo swap <message> <message> ->description Swap message contents and embeds between messages through the bot ->extended description Swap message contents and embeds between the two given messages. __Args__: `message_a: (Message)` > A discord message whose embed > should be swapped with that of `message_b`. `message_b: (Message)` > Another discord message whose embed > should be swapped with that of `message_a`. `embeds: (bool) = True` > If set to `True`, the first embeds will also > (when present) be swapped between the given messages. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo swap 123456789123456789 69696969969669420 -----
pgbot/commands/admin/sudo.py
cmd_sudo_swap
gresm/PygameCommunityBot
77
python
@add_group('sudo', 'swap') async def cmd_sudo_swap(self, msg_a: discord.Message, msg_b: discord.Message, embeds: bool=True): '\n ->type More admin commands\n ->signature pg!sudo swap <message> <message>\n ->description Swap message contents and embeds between messages through the bot\n ->extended description\n Swap message contents and embeds between the two given messages.\n\n __Args__:\n `message_a: (Message)`\n > A discord message whose embed\n > should be swapped with that of `message_b`.\n\n `message_b: (Message)`\n > Another discord message whose embed\n > should be swapped with that of `message_a`.\n\n `embeds: (bool) = True`\n > If set to `True`, the first embeds will also\n > (when present) be swapped between the given messages.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command pg!sudo swap 123456789123456789 69696969969669420\n -----\n ' if ((not utils.check_channel_permissions(self.author, msg_a.channel, permissions=('view_channel', 'send_messages'))) or (not utils.check_channel_permissions(self.author, msg_b.channel, permissions=('view_channel', 'send_messages')))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (((not msg_a.content) and (not msg_a.embeds)) or ((not msg_b.content) and (not msg_b.embeds))): raise BotException('Cannot execute command:', 'Not enough data found in one or more of the given messages.') elif (common.bot.user.id not in (msg_a.author.id, msg_b.author.id)): raise BotException('Cannot execute command:', f'Both messages must have been authored by me, {common.bot.user.mention}.') msg_embed_a = (msg_a.embeds[0] if msg_a.embeds else None) msg_embed_b = (msg_b.embeds[0] if msg_b.embeds else None) msg_content_a = msg_a.content msg_content_b = msg_b.content if embeds: (await msg_a.edit(content=msg_content_b, embed=msg_embed_b)) (await msg_b.edit(content=msg_content_a, embed=msg_embed_a)) else: (await msg_a.edit(content=msg_content_b)) (await msg_b.edit(content=msg_content_a)) try: (await self.response_msg.delete()) except discord.NotFound: pass
@add_group('sudo', 'swap') async def cmd_sudo_swap(self, msg_a: discord.Message, msg_b: discord.Message, embeds: bool=True): '\n ->type More admin commands\n ->signature pg!sudo swap <message> <message>\n ->description Swap message contents and embeds between messages through the bot\n ->extended description\n Swap message contents and embeds between the two given messages.\n\n __Args__:\n `message_a: (Message)`\n > A discord message whose embed\n > should be swapped with that of `message_b`.\n\n `message_b: (Message)`\n > Another discord message whose embed\n > should be swapped with that of `message_a`.\n\n `embeds: (bool) = True`\n > If set to `True`, the first embeds will also\n > (when present) be swapped between the given messages.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n\n ->example command pg!sudo swap 123456789123456789 69696969969669420\n -----\n ' if ((not utils.check_channel_permissions(self.author, msg_a.channel, permissions=('view_channel', 'send_messages'))) or (not utils.check_channel_permissions(self.author, msg_b.channel, permissions=('view_channel', 'send_messages')))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') if (((not msg_a.content) and (not msg_a.embeds)) or ((not msg_b.content) and (not msg_b.embeds))): raise BotException('Cannot execute command:', 'Not enough data found in one or more of the given messages.') elif (common.bot.user.id not in (msg_a.author.id, msg_b.author.id)): raise BotException('Cannot execute command:', f'Both messages must have been authored by me, {common.bot.user.mention}.') msg_embed_a = (msg_a.embeds[0] if msg_a.embeds else None) msg_embed_b = (msg_b.embeds[0] if msg_b.embeds else None) msg_content_a = msg_a.content msg_content_b = msg_b.content if embeds: (await msg_a.edit(content=msg_content_b, embed=msg_embed_b)) (await msg_b.edit(content=msg_content_a, embed=msg_embed_a)) else: (await msg_a.edit(content=msg_content_b)) (await msg_b.edit(content=msg_content_a)) try: (await self.response_msg.delete()) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo swap <message> <message> ->description Swap message contents and embeds between messages through the bot ->extended description Swap message contents and embeds between the two given messages. __Args__: `message_a: (Message)` > A discord message whose embed > should be swapped with that of `message_b`. `message_b: (Message)` > Another discord message whose embed > should be swapped with that of `message_a`. `embeds: (bool) = True` > If set to `True`, the first embeds will also > (when present) be swapped between the given messages. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. ->example command pg!sudo swap 123456789123456789 69696969969669420 -----<|endoftext|>
bd2d6ce55f7148bed2aeaf225e77a19294a6cedf35e96884949e25a50186d305
@add_group('sudo', 'get') async def cmd_sudo_get(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, as_attachment: bool=False, attachments: bool=True, embeds: bool=True, info: bool=False, author_info: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_get <*messages> [destination=] [as_attachment=False] [attachments=True]\n [embeds=True] [info=False] [author_info=False]\n ->description Get the text of messages through the bot\n ->extended description\n Get the contents, attachments and serialized embeds of the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be retrieved.\n\n `destination: (Channel) =`\n > A destination channel to send the generated outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description. This will always occur if the text\n > content is above 2000 characters.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be retrieved (when possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be retrieved (as serialized JSON data).\n\n +===+\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n __Returns__:\n > One or more messages with attachents or embeds\n > based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'''`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ''' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) escaped_msg_content = msg.content.replace('```', '\\`\\`\\`') attached_files = None if attachments: with io.StringIO('This file was too large to be duplicated.') as fobj: attached_files = [((await a.to_file(spoiler=a.is_spoiler())) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if info: info_embed = embed_utils.get_msg_info_embed(msg, author_info) info_embed.set_author(name='Message data & info') info_embed.title = '' info_embed.description = ''.join(((('__Text' + (' (Shortened)' if (len(escaped_msg_content) > 2000) else '')) + '__:'), ((f''' ``` {escaped_msg_content[:2001]} [...] ```''' + '\n⠀') if (len(escaped_msg_content) > 2000) else '\n⠀'))) content_file = None if (as_attachment or (len(msg.content) > 2000)): with io.StringIO(msg.content) as fobj: content_file = discord.File(fobj, 'messagedata.txt') (await destination.send(embed=info_embed, file=content_file)) elif as_attachment: with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) elif ((len(msg.content) > 2000) or (len(escaped_msg_content) > 2000)): with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) else: (await embed_utils.send(self.channel, author_name='Message data', description='```\n{0}```'.format(escaped_msg_content), fields=(('⠀', f'**[View Original Message]({msg.jump_url})**', False),))) if attached_files: for i in range(len(attached_files)): (await self.channel.send(content=f'**Message attachment** ({(i + 1)}):', file=attached_files[i])) if (embeds and msg.embeds): embed_data_fobjs = [] for embed in msg.embeds: embed_data_fobj = io.StringIO() embed_utils.export_embed_data(embed.to_dict(), fp=embed_data_fobj, indent=4, as_json=True) embed_data_fobj.seek(0) embed_data_fobjs.append(embed_data_fobj) for i in range(len(embed_data_fobjs)): (await self.channel.send(content=f'**Message embed** ({(i + 1)}):', file=discord.File(embed_data_fobjs[i], filename='embeddata.json'))) for embed_data_fobj in embed_data_fobjs: embed_data_fobj.close() (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'''`{msg_count}/{msg_count}` messages processed 100% | ''' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo_get <*messages> [destination=] [as_attachment=False] [attachments=True] [embeds=True] [info=False] [author_info=False] ->description Get the text of messages through the bot ->extended description Get the contents, attachments and serialized embeds of the given messages and send them to the given destination channel. __Args__: `*messages: (Message)` > A sequence of discord messages whose text, contents, attachments or embeds should be retrieved. `destination: (Channel) =` > A destination channel to send the generated outputs to. > If omitted, the destination will be the channel where > this command was invoked. `as_attachment: (bool) = False` > Whether the text content (if present) of the given > messages should be sent as an attachment (`.txt`) > or as embed containing it inside a code block in its > description. This will always occur if the text > content is above 2000 characters. `attachments: (bool) = True` > Whether the attachments of the given messages > should be retrieved (when possible). `embeds: (bool) = True` > Whether the embeds of the given messages > should be retrieved (as serialized JSON data). +===+ `info: (bool) = False` > If set to `True`, an embed containing info > about each message will be sent along with > the message data output. `author_info: (bool) = True` > If set to `True`, extra information about > the message authors will be added to the > info embed which is sent if `info` is set > to `True`. __Returns__: > One or more messages with attachents or embeds > based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. -----
pgbot/commands/admin/sudo.py
cmd_sudo_get
gresm/PygameCommunityBot
77
python
@add_group('sudo', 'get') async def cmd_sudo_get(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, as_attachment: bool=False, attachments: bool=True, embeds: bool=True, info: bool=False, author_info: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_get <*messages> [destination=] [as_attachment=False] [attachments=True]\n [embeds=True] [info=False] [author_info=False]\n ->description Get the text of messages through the bot\n ->extended description\n Get the contents, attachments and serialized embeds of the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be retrieved.\n\n `destination: (Channel) =`\n > A destination channel to send the generated outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description. This will always occur if the text\n > content is above 2000 characters.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be retrieved (when possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be retrieved (as serialized JSON data).\n\n +===+\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n __Returns__:\n > One or more messages with attachents or embeds\n > based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) escaped_msg_content = msg.content.replace('```', '\\`\\`\\`') attached_files = None if attachments: with io.StringIO('This file was too large to be duplicated.') as fobj: attached_files = [((await a.to_file(spoiler=a.is_spoiler())) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if info: info_embed = embed_utils.get_msg_info_embed(msg, author_info) info_embed.set_author(name='Message data & info') info_embed.title = info_embed.description = .join(((('__Text' + (' (Shortened)' if (len(escaped_msg_content) > 2000) else )) + '__:'), ((f' ``` {escaped_msg_content[:2001]} [...] ```' + '\n⠀') if (len(escaped_msg_content) > 2000) else '\n⠀'))) content_file = None if (as_attachment or (len(msg.content) > 2000)): with io.StringIO(msg.content) as fobj: content_file = discord.File(fobj, 'messagedata.txt') (await destination.send(embed=info_embed, file=content_file)) elif as_attachment: with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) elif ((len(msg.content) > 2000) or (len(escaped_msg_content) > 2000)): with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) else: (await embed_utils.send(self.channel, author_name='Message data', description='```\n{0}```'.format(escaped_msg_content), fields=(('⠀', f'**[View Original Message]({msg.jump_url})**', False),))) if attached_files: for i in range(len(attached_files)): (await self.channel.send(content=f'**Message attachment** ({(i + 1)}):', file=attached_files[i])) if (embeds and msg.embeds): embed_data_fobjs = [] for embed in msg.embeds: embed_data_fobj = io.StringIO() embed_utils.export_embed_data(embed.to_dict(), fp=embed_data_fobj, indent=4, as_json=True) embed_data_fobj.seek(0) embed_data_fobjs.append(embed_data_fobj) for i in range(len(embed_data_fobjs)): (await self.channel.send(content=f'**Message embed** ({(i + 1)}):', file=discord.File(embed_data_fobjs[i], filename='embeddata.json'))) for embed_data_fobj in embed_data_fobjs: embed_data_fobj.close() (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{msg_count}/{msg_count}` messages processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass
@add_group('sudo', 'get') async def cmd_sudo_get(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, as_attachment: bool=False, attachments: bool=True, embeds: bool=True, info: bool=False, author_info: bool=True): '\n ->type More admin commands\n ->signature pg!sudo_get <*messages> [destination=] [as_attachment=False] [attachments=True]\n [embeds=True] [info=False] [author_info=False]\n ->description Get the text of messages through the bot\n ->extended description\n Get the contents, attachments and serialized embeds of the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be retrieved.\n\n `destination: (Channel) =`\n > A destination channel to send the generated outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description. This will always occur if the text\n > content is above 2000 characters.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be retrieved (when possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be retrieved (as serialized JSON data).\n\n +===+\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n __Returns__:\n > One or more messages with attachents or embeds\n > based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) escaped_msg_content = msg.content.replace('```', '\\`\\`\\`') attached_files = None if attachments: with io.StringIO('This file was too large to be duplicated.') as fobj: attached_files = [((await a.to_file(spoiler=a.is_spoiler())) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if info: info_embed = embed_utils.get_msg_info_embed(msg, author_info) info_embed.set_author(name='Message data & info') info_embed.title = info_embed.description = .join(((('__Text' + (' (Shortened)' if (len(escaped_msg_content) > 2000) else )) + '__:'), ((f' ``` {escaped_msg_content[:2001]} [...] ```' + '\n⠀') if (len(escaped_msg_content) > 2000) else '\n⠀'))) content_file = None if (as_attachment or (len(msg.content) > 2000)): with io.StringIO(msg.content) as fobj: content_file = discord.File(fobj, 'messagedata.txt') (await destination.send(embed=info_embed, file=content_file)) elif as_attachment: with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) elif ((len(msg.content) > 2000) or (len(escaped_msg_content) > 2000)): with io.StringIO(msg.content) as fobj: (await destination.send(file=discord.File(fobj, 'messagedata.txt'), embed=embed_utils.create(author_name='Message data', description=f'**[View Original Message]({msg.jump_url})**'))) else: (await embed_utils.send(self.channel, author_name='Message data', description='```\n{0}```'.format(escaped_msg_content), fields=(('⠀', f'**[View Original Message]({msg.jump_url})**', False),))) if attached_files: for i in range(len(attached_files)): (await self.channel.send(content=f'**Message attachment** ({(i + 1)}):', file=attached_files[i])) if (embeds and msg.embeds): embed_data_fobjs = [] for embed in msg.embeds: embed_data_fobj = io.StringIO() embed_utils.export_embed_data(embed.to_dict(), fp=embed_data_fobj, indent=4, as_json=True) embed_data_fobj.seek(0) embed_data_fobjs.append(embed_data_fobj) for i in range(len(embed_data_fobjs)): (await self.channel.send(content=f'**Message embed** ({(i + 1)}):', file=discord.File(embed_data_fobjs[i], filename='embeddata.json'))) for embed_data_fobj in embed_data_fobjs: embed_data_fobj.close() (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{msg_count}/{msg_count}` messages processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo_get <*messages> [destination=] [as_attachment=False] [attachments=True] [embeds=True] [info=False] [author_info=False] ->description Get the text of messages through the bot ->extended description Get the contents, attachments and serialized embeds of the given messages and send them to the given destination channel. __Args__: `*messages: (Message)` > A sequence of discord messages whose text, contents, attachments or embeds should be retrieved. `destination: (Channel) =` > A destination channel to send the generated outputs to. > If omitted, the destination will be the channel where > this command was invoked. `as_attachment: (bool) = False` > Whether the text content (if present) of the given > messages should be sent as an attachment (`.txt`) > or as embed containing it inside a code block in its > description. This will always occur if the text > content is above 2000 characters. `attachments: (bool) = True` > Whether the attachments of the given messages > should be retrieved (when possible). `embeds: (bool) = True` > Whether the embeds of the given messages > should be retrieved (as serialized JSON data). +===+ `info: (bool) = False` > If set to `True`, an embed containing info > about each message will be sent along with > the message data output. `author_info: (bool) = True` > If set to `True`, extra information about > the message authors will be added to the > info embed which is sent if `info` is set > to `True`. __Returns__: > One or more messages with attachents or embeds > based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. -----<|endoftext|>
ab574b88b6b30f64ca58d099c58cc694a2fa3d87885c92b0e3ccebcb25ccece3
@add_group('sudo', 'fetch') async def cmd_sudo_fetch(self, origin: discord.TextChannel, quantity: int, channel_ids: bool=False, urls: bool=False, pinned: bool=False, pin_range: Optional[range]=None, before: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, after: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, around: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, oldest_first: bool=True, prefix: String=String(''), sep: String=String(' '), suffix: String=String('')): '\n ->type More admin commands\n ->signature pg!sudo fetch <origin channel> <quantity> [urls=False] [pinned=False] [pin_range=]\n [before=None] [after=None] [around=None] [oldest_first=True] [prefix=""] [sep=" "] [suffix=""]\n ->description Fetch message IDs or URLs\n -----\n ' if (not utils.check_channel_permissions(self.author, origin, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command on the specified channel.') (prefix, sep, suffix) = (prefix.string, sep.string, suffix.string) output_str = prefix destination = self.channel if pinned: messages = (await origin.pins()) if (not messages): raise BotException('No pinned messages found', 'No pinned messages were found in the specified channel.') if oldest_first: messages.reverse() if (quantity > 0): messages = messages[:(quantity + 1)] if oldest_first: messages.reverse() elif (quantity == 0): if pin_range: messages = messages[pin_range.start:pin_range.stop:pin_range.step] if ((pin_range.step != (- 1)) and oldest_first): messages.reverse() elif (quantity < 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (`=> 0`).') else: if (isinstance(before, discord.PartialMessage) and (before.channel.id != origin.id)): raise BotException('Invalid `before` argument', '`before` has to be an ID to a message from the origin channel') if (isinstance(after, discord.PartialMessage) and (after.channel.id != origin.id)): raise BotException('Invalid `after` argument', '`after` has to be an ID to a message from the origin channel') if (isinstance(around, discord.PartialMessage) and (around.channel.id != origin.id)): raise BotException('Invalid `around` argument', '`around` has to be an ID to a message from the origin channel') if (quantity <= 0): if ((quantity == 0) and (not after)): raise BotException('Invalid `quantity` argument', '`quantity` must be above 0 when `after=` is not specified.') elif (quantity != 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (or `0` when `after=` is specified).') (await destination.trigger_typing()) messages = (await origin.history(limit=(quantity if (quantity != 0) else None), before=before, after=after, around=around).flatten()) if (not messages): raise BotException('Invalid message/time range', 'No messages were found for the specified input values.') if ((not after) and oldest_first): messages.reverse() msg_count = len(messages) msgs_per_loop = 200 if urls: output_filename = 'message_urls.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((messages[j].jump_url for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((messages[j].jump_url for j in range((end_idx + 1), msg_count))) + suffix) elif channel_ids: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) else: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) with io.StringIO(output_str) as fobj: (await destination.send(file=discord.File(fobj, filename=output_filename))) try: (await self.response_msg.delete()) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo fetch <origin channel> <quantity> [urls=False] [pinned=False] [pin_range=] [before=None] [after=None] [around=None] [oldest_first=True] [prefix=""] [sep=" "] [suffix=""] ->description Fetch message IDs or URLs -----
pgbot/commands/admin/sudo.py
cmd_sudo_fetch
gresm/PygameCommunityBot
77
python
@add_group('sudo', 'fetch') async def cmd_sudo_fetch(self, origin: discord.TextChannel, quantity: int, channel_ids: bool=False, urls: bool=False, pinned: bool=False, pin_range: Optional[range]=None, before: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, after: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, around: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, oldest_first: bool=True, prefix: String=String(), sep: String=String(' '), suffix: String=String()): '\n ->type More admin commands\n ->signature pg!sudo fetch <origin channel> <quantity> [urls=False] [pinned=False] [pin_range=]\n [before=None] [after=None] [around=None] [oldest_first=True] [prefix=] [sep=" "] [suffix=]\n ->description Fetch message IDs or URLs\n -----\n ' if (not utils.check_channel_permissions(self.author, origin, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command on the specified channel.') (prefix, sep, suffix) = (prefix.string, sep.string, suffix.string) output_str = prefix destination = self.channel if pinned: messages = (await origin.pins()) if (not messages): raise BotException('No pinned messages found', 'No pinned messages were found in the specified channel.') if oldest_first: messages.reverse() if (quantity > 0): messages = messages[:(quantity + 1)] if oldest_first: messages.reverse() elif (quantity == 0): if pin_range: messages = messages[pin_range.start:pin_range.stop:pin_range.step] if ((pin_range.step != (- 1)) and oldest_first): messages.reverse() elif (quantity < 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (`=> 0`).') else: if (isinstance(before, discord.PartialMessage) and (before.channel.id != origin.id)): raise BotException('Invalid `before` argument', '`before` has to be an ID to a message from the origin channel') if (isinstance(after, discord.PartialMessage) and (after.channel.id != origin.id)): raise BotException('Invalid `after` argument', '`after` has to be an ID to a message from the origin channel') if (isinstance(around, discord.PartialMessage) and (around.channel.id != origin.id)): raise BotException('Invalid `around` argument', '`around` has to be an ID to a message from the origin channel') if (quantity <= 0): if ((quantity == 0) and (not after)): raise BotException('Invalid `quantity` argument', '`quantity` must be above 0 when `after=` is not specified.') elif (quantity != 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (or `0` when `after=` is specified).') (await destination.trigger_typing()) messages = (await origin.history(limit=(quantity if (quantity != 0) else None), before=before, after=after, around=around).flatten()) if (not messages): raise BotException('Invalid message/time range', 'No messages were found for the specified input values.') if ((not after) and oldest_first): messages.reverse() msg_count = len(messages) msgs_per_loop = 200 if urls: output_filename = 'message_urls.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((messages[j].jump_url for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((messages[j].jump_url for j in range((end_idx + 1), msg_count))) + suffix) elif channel_ids: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) else: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) with io.StringIO(output_str) as fobj: (await destination.send(file=discord.File(fobj, filename=output_filename))) try: (await self.response_msg.delete()) except discord.NotFound: pass
@add_group('sudo', 'fetch') async def cmd_sudo_fetch(self, origin: discord.TextChannel, quantity: int, channel_ids: bool=False, urls: bool=False, pinned: bool=False, pin_range: Optional[range]=None, before: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, after: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, around: Optional[Union[(discord.PartialMessage, datetime.datetime)]]=None, oldest_first: bool=True, prefix: String=String(), sep: String=String(' '), suffix: String=String()): '\n ->type More admin commands\n ->signature pg!sudo fetch <origin channel> <quantity> [urls=False] [pinned=False] [pin_range=]\n [before=None] [after=None] [around=None] [oldest_first=True] [prefix=] [sep=" "] [suffix=]\n ->description Fetch message IDs or URLs\n -----\n ' if (not utils.check_channel_permissions(self.author, origin, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command on the specified channel.') (prefix, sep, suffix) = (prefix.string, sep.string, suffix.string) output_str = prefix destination = self.channel if pinned: messages = (await origin.pins()) if (not messages): raise BotException('No pinned messages found', 'No pinned messages were found in the specified channel.') if oldest_first: messages.reverse() if (quantity > 0): messages = messages[:(quantity + 1)] if oldest_first: messages.reverse() elif (quantity == 0): if pin_range: messages = messages[pin_range.start:pin_range.stop:pin_range.step] if ((pin_range.step != (- 1)) and oldest_first): messages.reverse() elif (quantity < 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (`=> 0`).') else: if (isinstance(before, discord.PartialMessage) and (before.channel.id != origin.id)): raise BotException('Invalid `before` argument', '`before` has to be an ID to a message from the origin channel') if (isinstance(after, discord.PartialMessage) and (after.channel.id != origin.id)): raise BotException('Invalid `after` argument', '`after` has to be an ID to a message from the origin channel') if (isinstance(around, discord.PartialMessage) and (around.channel.id != origin.id)): raise BotException('Invalid `around` argument', '`around` has to be an ID to a message from the origin channel') if (quantity <= 0): if ((quantity == 0) and (not after)): raise BotException('Invalid `quantity` argument', '`quantity` must be above 0 when `after=` is not specified.') elif (quantity != 0): raise BotException('Invalid `quantity` argument', 'Quantity has to be a positive integer (or `0` when `after=` is specified).') (await destination.trigger_typing()) messages = (await origin.history(limit=(quantity if (quantity != 0) else None), before=before, after=after, around=around).flatten()) if (not messages): raise BotException('Invalid message/time range', 'No messages were found for the specified input values.') if ((not after) and oldest_first): messages.reverse() msg_count = len(messages) msgs_per_loop = 200 if urls: output_filename = 'message_urls.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((messages[j].jump_url for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((messages[j].jump_url for j in range((end_idx + 1), msg_count))) + suffix) elif channel_ids: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].channel.id}/{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) else: output_filename = 'message_and_channel_ids.txt' start_idx = 0 end_idx = 0 for i in range((msg_count // msgs_per_loop)): start_idx = (msgs_per_loop * i) end_idx = ((start_idx + msgs_per_loop) - 1) output_str += sep.join((f'{messages[j].id}' for j in range(start_idx, (start_idx + msgs_per_loop)))) (await asyncio.sleep(0)) output_str += (sep.join((f'{messages[j].id}' for j in range((end_idx + 1), msg_count))) + suffix) with io.StringIO(output_str) as fobj: (await destination.send(file=discord.File(fobj, filename=output_filename))) try: (await self.response_msg.delete()) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo fetch <origin channel> <quantity> [urls=False] [pinned=False] [pin_range=] [before=None] [after=None] [around=None] [oldest_first=True] [prefix=""] [sep=" "] [suffix=""] ->description Fetch message IDs or URLs -----<|endoftext|>
7f9aa8e4a2cb803e6f0599dc7bdd72eb18ab8a36c467225e6f52360355ceac20
@add_group('sudo', 'clone') async def cmd_sudo_clone(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, embeds: bool=True, attachments: bool=True, as_spoiler: bool=False, info: bool=False, author_info: bool=True, skip_empty: bool=True): '\n ->type More admin commands\n ->signature pg!sudo clone <*messages> [destination=] [embeds=True] [attachments=True] [as_spoiler=False]\n [info=False] [author_info=True]\n ->description Clone a message through the bot\n ->extended description\n Clone the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be cloned.\n\n `destination: (Channel) =`\n > A destination channel to send the cloned outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be cloned as well (if possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be cloned along with the outut messages.\n\n +===+\n\n `as_spoiler: (bool) = False`\n > If set to `True`, the attachments of the input messages\n > will be explicitly marked as spoilers when sent to the\n > destination channel.\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n `skip_empty: (bool) = True`\n > Whether empty messages\n > should be skipped.\n\n __Returns__:\n > One or more cloned messages with attachents\n > or embeds based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) no_mentions = discord.AllowedMentions.none() for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'''`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ''' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) cloned_msg0 = None attached_files = [] if (msg.attachments and attachments): with io.StringIO('This file was too large to be cloned.') as fobj: attached_files = [((await a.to_file(spoiler=(a.is_spoiler() or as_spoiler))) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if (msg.content or msg.embeds or attached_files): if (len(msg.content) > 2000): start_idx = 0 stop_idx = 0 for i in range((len(msg.content) // 2000)): start_idx = (2000 * i) stop_idx = (2000 + (2000 * i)) if (not i): cloned_msg0 = (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) else: (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) with io.StringIO(msg.content) as fobj: (await destination.send(content=msg.content[stop_idx:], embed=embed_utils.create(footer_text='Full message data'), file=discord.File(fobj, filename='messagedata.txt'), allowed_mentions=no_mentions)) (await destination.send(embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None))) else: cloned_msg0 = (await destination.send(content=msg.content, embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None), allowed_mentions=no_mentions)) elif (not skip_empty): raise BotException('Cannot clone an empty message!', '') for i in range(1, len(attached_files)): (await self.channel.send(file=attached_files[i])) for i in range(1, len(msg.embeds)): (await self.channel.send(embed=msg.embeds[i])) if info: (await self.channel.send(embed=embed_utils.get_msg_info_embed(msg, author=author_info), reference=cloned_msg0)) (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'''`{msg_count}/{msg_count}` messages processed 100% | ''' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass
->type More admin commands ->signature pg!sudo clone <*messages> [destination=] [embeds=True] [attachments=True] [as_spoiler=False] [info=False] [author_info=True] ->description Clone a message through the bot ->extended description Clone the given messages and send them to the given destination channel. __Args__: `*messages: (Message)` > A sequence of discord messages whose text, contents, attachments or embeds should be cloned. `destination: (Channel) =` > A destination channel to send the cloned outputs to. > If omitted, the destination will be the channel where > this command was invoked. `as_attachment: (bool) = False` > Whether the text content (if present) of the given > messages should be sent as an attachment (`.txt`) > or as embed containing it inside a code block in its > description. `attachments: (bool) = True` > Whether the attachments of the given messages > should be cloned as well (if possible). `embeds: (bool) = True` > Whether the embeds of the given messages > should be cloned along with the outut messages. +===+ `as_spoiler: (bool) = False` > If set to `True`, the attachments of the input messages > will be explicitly marked as spoilers when sent to the > destination channel. `info: (bool) = False` > If set to `True`, an embed containing info > about each message will be sent along with > the message data output. `author_info: (bool) = True` > If set to `True`, extra information about > the message authors will be added to the > info embed which is sent if `info` is set > to `True`. `skip_empty: (bool) = True` > Whether empty messages > should be skipped. __Returns__: > One or more cloned messages with attachents > or embeds based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. -----
pgbot/commands/admin/sudo.py
cmd_sudo_clone
gresm/PygameCommunityBot
77
python
@add_group('sudo', 'clone') async def cmd_sudo_clone(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, embeds: bool=True, attachments: bool=True, as_spoiler: bool=False, info: bool=False, author_info: bool=True, skip_empty: bool=True): '\n ->type More admin commands\n ->signature pg!sudo clone <*messages> [destination=] [embeds=True] [attachments=True] [as_spoiler=False]\n [info=False] [author_info=True]\n ->description Clone a message through the bot\n ->extended description\n Clone the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be cloned.\n\n `destination: (Channel) =`\n > A destination channel to send the cloned outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be cloned as well (if possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be cloned along with the outut messages.\n\n +===+\n\n `as_spoiler: (bool) = False`\n > If set to `True`, the attachments of the input messages\n > will be explicitly marked as spoilers when sent to the\n > destination channel.\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n `skip_empty: (bool) = True`\n > Whether empty messages\n > should be skipped.\n\n __Returns__:\n > One or more cloned messages with attachents\n > or embeds based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) no_mentions = discord.AllowedMentions.none() for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) cloned_msg0 = None attached_files = [] if (msg.attachments and attachments): with io.StringIO('This file was too large to be cloned.') as fobj: attached_files = [((await a.to_file(spoiler=(a.is_spoiler() or as_spoiler))) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if (msg.content or msg.embeds or attached_files): if (len(msg.content) > 2000): start_idx = 0 stop_idx = 0 for i in range((len(msg.content) // 2000)): start_idx = (2000 * i) stop_idx = (2000 + (2000 * i)) if (not i): cloned_msg0 = (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) else: (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) with io.StringIO(msg.content) as fobj: (await destination.send(content=msg.content[stop_idx:], embed=embed_utils.create(footer_text='Full message data'), file=discord.File(fobj, filename='messagedata.txt'), allowed_mentions=no_mentions)) (await destination.send(embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None))) else: cloned_msg0 = (await destination.send(content=msg.content, embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None), allowed_mentions=no_mentions)) elif (not skip_empty): raise BotException('Cannot clone an empty message!', ) for i in range(1, len(attached_files)): (await self.channel.send(file=attached_files[i])) for i in range(1, len(msg.embeds)): (await self.channel.send(embed=msg.embeds[i])) if info: (await self.channel.send(embed=embed_utils.get_msg_info_embed(msg, author=author_info), reference=cloned_msg0)) (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{msg_count}/{msg_count}` messages processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass
@add_group('sudo', 'clone') async def cmd_sudo_clone(self, *msgs: discord.Message, destination: Optional[common.Channel]=None, embeds: bool=True, attachments: bool=True, as_spoiler: bool=False, info: bool=False, author_info: bool=True, skip_empty: bool=True): '\n ->type More admin commands\n ->signature pg!sudo clone <*messages> [destination=] [embeds=True] [attachments=True] [as_spoiler=False]\n [info=False] [author_info=True]\n ->description Clone a message through the bot\n ->extended description\n Clone the given messages and send them to the given destination channel.\n\n __Args__:\n `*messages: (Message)`\n > A sequence of discord messages whose text,\n contents, attachments or embeds should be cloned.\n\n `destination: (Channel) =`\n > A destination channel to send the cloned outputs to.\n > If omitted, the destination will be the channel where\n > this command was invoked.\n\n `as_attachment: (bool) = False`\n > Whether the text content (if present) of the given\n > messages should be sent as an attachment (`.txt`)\n > or as embed containing it inside a code block in its\n > description.\n\n `attachments: (bool) = True`\n > Whether the attachments of the given messages\n > should be cloned as well (if possible).\n\n `embeds: (bool) = True`\n > Whether the embeds of the given messages\n > should be cloned along with the outut messages.\n\n +===+\n\n `as_spoiler: (bool) = False`\n > If set to `True`, the attachments of the input messages\n > will be explicitly marked as spoilers when sent to the\n > destination channel.\n\n `info: (bool) = False`\n > If set to `True`, an embed containing info\n > about each message will be sent along with\n > the message data output.\n\n `author_info: (bool) = True`\n > If set to `True`, extra information about\n > the message authors will be added to the\n > info embed which is sent if `info` is set\n > to `True`.\n\n `skip_empty: (bool) = True`\n > Whether empty messages\n > should be skipped.\n\n __Returns__:\n > One or more cloned messages with attachents\n > or embeds based on the given input.\n\n __Raises__:\n > `BotException`: One or more given arguments are invalid.\n > `HTTPException`: An invalid operation was blocked by Discord.\n -----\n ' if (not isinstance(destination, discord.TextChannel)): destination = self.channel if (not utils.check_channel_permissions(self.author, destination, permissions=('view_channel', 'send_messages'))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') checked_channels = set() for (i, msg) in enumerate(msgs): if (msg.channel not in checked_channels): if (not utils.check_channel_permissions(self.author, msg.channel, permissions=('view_channel',))): raise BotException('Not enough permissions', 'You do not have enough permissions to run this command with the specified arguments.') else: checked_channels.add(msg.channel) if (not (i % 50)): (await asyncio.sleep(0)) if (not msgs): raise BotException('Invalid arguments!', 'No messages given as input.') load_embed = embed_utils.create(title='Your command is being processed:', fields=(('⠀', '`...`', False),)) msg_count = len(msgs) no_mentions = discord.AllowedMentions.none() for (i, msg) in enumerate(msgs): if ((msg_count > 2) and (not (i % 3))): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Messages', value=(f'`{i}/{msg_count}` messages processed {((i / msg_count) * 100):.01f}% | ' + utils.progress_bar((i / msg_count), divisions=30))), 0)) (await destination.trigger_typing()) cloned_msg0 = None attached_files = [] if (msg.attachments and attachments): with io.StringIO('This file was too large to be cloned.') as fobj: attached_files = [((await a.to_file(spoiler=(a.is_spoiler() or as_spoiler))) if (a.size <= self.filesize_limit) else discord.File(fobj, f'filetoolarge - {a.filename}.txt')) for a in msg.attachments] if (msg.content or msg.embeds or attached_files): if (len(msg.content) > 2000): start_idx = 0 stop_idx = 0 for i in range((len(msg.content) // 2000)): start_idx = (2000 * i) stop_idx = (2000 + (2000 * i)) if (not i): cloned_msg0 = (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) else: (await destination.send(content=msg.content[start_idx:stop_idx], allowed_mentions=no_mentions)) with io.StringIO(msg.content) as fobj: (await destination.send(content=msg.content[stop_idx:], embed=embed_utils.create(footer_text='Full message data'), file=discord.File(fobj, filename='messagedata.txt'), allowed_mentions=no_mentions)) (await destination.send(embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None))) else: cloned_msg0 = (await destination.send(content=msg.content, embed=(msg.embeds[0] if (msg.embeds and embeds) else None), file=(attached_files[0] if attached_files else None), allowed_mentions=no_mentions)) elif (not skip_empty): raise BotException('Cannot clone an empty message!', ) for i in range(1, len(attached_files)): (await self.channel.send(file=attached_files[i])) for i in range(1, len(msg.embeds)): (await self.channel.send(embed=msg.embeds[i])) if info: (await self.channel.send(embed=embed_utils.get_msg_info_embed(msg, author=author_info), reference=cloned_msg0)) (await asyncio.sleep(0)) if (msg_count > 2): (await embed_utils.edit_field_from_dict(self.response_msg, load_embed, dict(name='Processing Completed', value=(f'`{msg_count}/{msg_count}` messages processed 100% | ' + utils.progress_bar(1.0, divisions=30))), 0)) try: (await self.response_msg.delete(delay=(10 if (msg_count > 2) else 0))) except discord.NotFound: pass<|docstring|>->type More admin commands ->signature pg!sudo clone <*messages> [destination=] [embeds=True] [attachments=True] [as_spoiler=False] [info=False] [author_info=True] ->description Clone a message through the bot ->extended description Clone the given messages and send them to the given destination channel. __Args__: `*messages: (Message)` > A sequence of discord messages whose text, contents, attachments or embeds should be cloned. `destination: (Channel) =` > A destination channel to send the cloned outputs to. > If omitted, the destination will be the channel where > this command was invoked. `as_attachment: (bool) = False` > Whether the text content (if present) of the given > messages should be sent as an attachment (`.txt`) > or as embed containing it inside a code block in its > description. `attachments: (bool) = True` > Whether the attachments of the given messages > should be cloned as well (if possible). `embeds: (bool) = True` > Whether the embeds of the given messages > should be cloned along with the outut messages. +===+ `as_spoiler: (bool) = False` > If set to `True`, the attachments of the input messages > will be explicitly marked as spoilers when sent to the > destination channel. `info: (bool) = False` > If set to `True`, an embed containing info > about each message will be sent along with > the message data output. `author_info: (bool) = True` > If set to `True`, extra information about > the message authors will be added to the > info embed which is sent if `info` is set > to `True`. `skip_empty: (bool) = True` > Whether empty messages > should be skipped. __Returns__: > One or more cloned messages with attachents > or embeds based on the given input. __Raises__: > `BotException`: One or more given arguments are invalid. > `HTTPException`: An invalid operation was blocked by Discord. -----<|endoftext|>
d93273a4c3783ea440b0e041113b09acd0be0ce0f34719765dcd0308a2947484
def timediff_min_sec(self): ' Get timediff in format 1m48s ' tdiff = (self.timediff - self.time) if (tdiff.days < 0): diff_secs = (tdiff.seconds - 86400) else: diff_secs = tdiff.seconds (minutes, seconds) = divmod(diff_secs, 60) if ((minutes == 0) and (seconds == 0)): return '-' if minutes: return '{0}m{1:02d}s'.format(minutes, seconds) return '{0:02d}s'.format(seconds)
Get timediff in format 1m48s
focli/foline.py
timediff_min_sec
joohoi/focli
8
python
def timediff_min_sec(self): ' ' tdiff = (self.timediff - self.time) if (tdiff.days < 0): diff_secs = (tdiff.seconds - 86400) else: diff_secs = tdiff.seconds (minutes, seconds) = divmod(diff_secs, 60) if ((minutes == 0) and (seconds == 0)): return '-' if minutes: return '{0}m{1:02d}s'.format(minutes, seconds) return '{0:02d}s'.format(seconds)
def timediff_min_sec(self): ' ' tdiff = (self.timediff - self.time) if (tdiff.days < 0): diff_secs = (tdiff.seconds - 86400) else: diff_secs = tdiff.seconds (minutes, seconds) = divmod(diff_secs, 60) if ((minutes == 0) and (seconds == 0)): return '-' if minutes: return '{0}m{1:02d}s'.format(minutes, seconds) return '{0:02d}s'.format(seconds)<|docstring|>Get timediff in format 1m48s<|endoftext|>
803dd6060fa4e32fe3640ea306f7be1739c5fa78d07855b7fd542dfb9c117a1d
def run(self): ' Perform request, and populate journey data table ' try: data = json.loads(self.get_data()) except ValueError: raise exceptions.FoliParseDataException('Got bad data from url: {0}'.format(self.url)) if (not self.stop_name): self.stop_name = self.stopnr self.journeys = [] try: for jo in data['result']: jo_time = jo['aimeddeparturetime'] line_time = datetime.datetime.fromtimestamp(jo_time) destination = jo['destinationdisplay'] new_line = FoliLine(jo['lineref'], line_time, realtime=True, dest=destination) if ('delay' in jo.keys()): new_line.ontime = False new_line.timediff = datetime.datetime.fromtimestamp(jo['expecteddeparturetime']) self.journeys.append(new_line) except KeyError: raise exceptions.FoliParseDataError('Error while parsing data for line {0}'.format(self.stopnr))
Perform request, and populate journey data table
focli/foline.py
run
joohoi/focli
8
python
def run(self): ' ' try: data = json.loads(self.get_data()) except ValueError: raise exceptions.FoliParseDataException('Got bad data from url: {0}'.format(self.url)) if (not self.stop_name): self.stop_name = self.stopnr self.journeys = [] try: for jo in data['result']: jo_time = jo['aimeddeparturetime'] line_time = datetime.datetime.fromtimestamp(jo_time) destination = jo['destinationdisplay'] new_line = FoliLine(jo['lineref'], line_time, realtime=True, dest=destination) if ('delay' in jo.keys()): new_line.ontime = False new_line.timediff = datetime.datetime.fromtimestamp(jo['expecteddeparturetime']) self.journeys.append(new_line) except KeyError: raise exceptions.FoliParseDataError('Error while parsing data for line {0}'.format(self.stopnr))
def run(self): ' ' try: data = json.loads(self.get_data()) except ValueError: raise exceptions.FoliParseDataException('Got bad data from url: {0}'.format(self.url)) if (not self.stop_name): self.stop_name = self.stopnr self.journeys = [] try: for jo in data['result']: jo_time = jo['aimeddeparturetime'] line_time = datetime.datetime.fromtimestamp(jo_time) destination = jo['destinationdisplay'] new_line = FoliLine(jo['lineref'], line_time, realtime=True, dest=destination) if ('delay' in jo.keys()): new_line.ontime = False new_line.timediff = datetime.datetime.fromtimestamp(jo['expecteddeparturetime']) self.journeys.append(new_line) except KeyError: raise exceptions.FoliParseDataError('Error while parsing data for line {0}'.format(self.stopnr))<|docstring|>Perform request, and populate journey data table<|endoftext|>
f7fd87c0b2733ca231886ed0de295d1492546880039ee3f074fcd279e1159d82
def normalize_stopnr(self, nr): ' Simple validation and normalization of stop nr,\n raise FoliStopNameException if b0rked\n ' try: if (nr[0].lower() == 't'): retnr = 'T{0}'.format(int(nr[1:])) else: retnr = '{0}'.format(int(nr)) except ValueError: raise exceptions.FoliStopNameException('{0} is not a valid stop id'.format(nr)) return retnr
Simple validation and normalization of stop nr, raise FoliStopNameException if b0rked
focli/foline.py
normalize_stopnr
joohoi/focli
8
python
def normalize_stopnr(self, nr): ' Simple validation and normalization of stop nr,\n raise FoliStopNameException if b0rked\n ' try: if (nr[0].lower() == 't'): retnr = 'T{0}'.format(int(nr[1:])) else: retnr = '{0}'.format(int(nr)) except ValueError: raise exceptions.FoliStopNameException('{0} is not a valid stop id'.format(nr)) return retnr
def normalize_stopnr(self, nr): ' Simple validation and normalization of stop nr,\n raise FoliStopNameException if b0rked\n ' try: if (nr[0].lower() == 't'): retnr = 'T{0}'.format(int(nr[1:])) else: retnr = '{0}'.format(int(nr)) except ValueError: raise exceptions.FoliStopNameException('{0} is not a valid stop id'.format(nr)) return retnr<|docstring|>Simple validation and normalization of stop nr, raise FoliStopNameException if b0rked<|endoftext|>
11fd33fd5496f977b0610dc2a630592507e9b87a8f1431791dd1667ba3b2d0c4
def abort_everything(self, ensure_ready=True): ' Tell the client to cancel any task submitted via this instance\n\n joblib.Parallel will never access those results\n ' self.client.cancel(self.task_futures) self.task_futures.clear()
Tell the client to cancel any task submitted via this instance joblib.Parallel will never access those results
venv/Lib/site-packages/sklearn/externals/joblib/_dask.py
abort_everything
jashrathod0/CV_on_the_Cloud2
8
python
def abort_everything(self, ensure_ready=True): ' Tell the client to cancel any task submitted via this instance\n\n joblib.Parallel will never access those results\n ' self.client.cancel(self.task_futures) self.task_futures.clear()
def abort_everything(self, ensure_ready=True): ' Tell the client to cancel any task submitted via this instance\n\n joblib.Parallel will never access those results\n ' self.client.cancel(self.task_futures) self.task_futures.clear()<|docstring|>Tell the client to cancel any task submitted via this instance joblib.Parallel will never access those results<|endoftext|>
ffc1bb0fc66a659f4244f7deab981505924d5d8257bc7df45944dd7db2656370
@contextlib.contextmanager def retrieval_context(self): "Override ParallelBackendBase.retrieval_context to avoid deadlocks.\n\n This removes thread from the worker's thread pool (using 'secede').\n Seceding avoids deadlock in nested parallelism settings.\n " if hasattr(thread_state, 'execution_state'): secede() (yield) if hasattr(thread_state, 'execution_state'): rejoin()
Override ParallelBackendBase.retrieval_context to avoid deadlocks. This removes thread from the worker's thread pool (using 'secede'). Seceding avoids deadlock in nested parallelism settings.
venv/Lib/site-packages/sklearn/externals/joblib/_dask.py
retrieval_context
jashrathod0/CV_on_the_Cloud2
8
python
@contextlib.contextmanager def retrieval_context(self): "Override ParallelBackendBase.retrieval_context to avoid deadlocks.\n\n This removes thread from the worker's thread pool (using 'secede').\n Seceding avoids deadlock in nested parallelism settings.\n " if hasattr(thread_state, 'execution_state'): secede() (yield) if hasattr(thread_state, 'execution_state'): rejoin()
@contextlib.contextmanager def retrieval_context(self): "Override ParallelBackendBase.retrieval_context to avoid deadlocks.\n\n This removes thread from the worker's thread pool (using 'secede').\n Seceding avoids deadlock in nested parallelism settings.\n " if hasattr(thread_state, 'execution_state'): secede() (yield) if hasattr(thread_state, 'execution_state'): rejoin()<|docstring|>Override ParallelBackendBase.retrieval_context to avoid deadlocks. This removes thread from the worker's thread pool (using 'secede'). Seceding avoids deadlock in nested parallelism settings.<|endoftext|>
fc92f4d22b992f8b8eff06d2bf4df0c8bef8665bdac71912b73dd8622564f354
def main(): '\n This script converts all pdf files in a given directory\n into images\n ' for doc in glob.glob('./PDFs/*.pdf'): if (not os.path.exists(('./temp/images/' + doc[7:(- 4)]))): os.makedirs(('./temp/images/' + doc[7:(- 4)])) print('Converting document', doc[7:(- 4)], '...') images = convert_from_path(doc, dpi=600, fmt='tiff', output_folder=('./temp/images/' + doc[7:(- 4)]))
This script converts all pdf files in a given directory into images
dags/pdfs_to_images.py
main
HebaNAS/Patent-Similarity-Check
0
python
def main(): '\n This script converts all pdf files in a given directory\n into images\n ' for doc in glob.glob('./PDFs/*.pdf'): if (not os.path.exists(('./temp/images/' + doc[7:(- 4)]))): os.makedirs(('./temp/images/' + doc[7:(- 4)])) print('Converting document', doc[7:(- 4)], '...') images = convert_from_path(doc, dpi=600, fmt='tiff', output_folder=('./temp/images/' + doc[7:(- 4)]))
def main(): '\n This script converts all pdf files in a given directory\n into images\n ' for doc in glob.glob('./PDFs/*.pdf'): if (not os.path.exists(('./temp/images/' + doc[7:(- 4)]))): os.makedirs(('./temp/images/' + doc[7:(- 4)])) print('Converting document', doc[7:(- 4)], '...') images = convert_from_path(doc, dpi=600, fmt='tiff', output_folder=('./temp/images/' + doc[7:(- 4)]))<|docstring|>This script converts all pdf files in a given directory into images<|endoftext|>
93723593046211696818963b3cc62c933fe3b17ed814552c3993d396edd70fb3
def define_parameters(self): "\n bin_size:\n visibility: basic\n dtype: int\n description: Bin Size for the downsample.\n default: 3\n mode:\n visibility: basic\n dtype: str\n description: One of 'mean', 'median', 'min', 'max'.\n default: mean\n options: [mean,median,min,max]\n pattern:\n visibility: basic\n dtype: str\n description: One of 'PROJECTION' or 'SINOGRAM' or 'VOLUME_XZ'.\n default: PROJECTION\n options: ['PROJECTION', 'SINOGRAM', 'VOLUME_XZ']\n num_bit:\n visibility: basic\n dtype: int\n description: Bit depth of the rescaled data (8, 16 or 32).\n default: 32\n options: [8,16,32]\n flip_updown:\n visibility: basic\n dtype: bool\n description: Flip images up-down.\n default: True\n flip_leftright:\n visibility: basic\n dtype: bool\n description: Flip images left-right.\n default: False\n rotate_angle:\n visibility: basic\n dtype: [float, str, list[float], dict{int:float}]\n description: Rotate images by a given angle (Degree).\n default: 0.0\n max:\n visibility: basic\n dtype: [None,float]\n description: Global max for scaling.\n default: None\n min:\n visibility: basic\n dtype: [None,float]\n description: Global min for scaling.\n default: None\n\n "
bin_size: visibility: basic dtype: int description: Bin Size for the downsample. default: 3 mode: visibility: basic dtype: str description: One of 'mean', 'median', 'min', 'max'. default: mean options: [mean,median,min,max] pattern: visibility: basic dtype: str description: One of 'PROJECTION' or 'SINOGRAM' or 'VOLUME_XZ'. default: PROJECTION options: ['PROJECTION', 'SINOGRAM', 'VOLUME_XZ'] num_bit: visibility: basic dtype: int description: Bit depth of the rescaled data (8, 16 or 32). default: 32 options: [8,16,32] flip_updown: visibility: basic dtype: bool description: Flip images up-down. default: True flip_leftright: visibility: basic dtype: bool description: Flip images left-right. default: False rotate_angle: visibility: basic dtype: [float, str, list[float], dict{int:float}] description: Rotate images by a given angle (Degree). default: 0.0 max: visibility: basic dtype: [None,float] description: Global max for scaling. default: None min: visibility: basic dtype: [None,float] description: Global min for scaling. default: None
savu/plugins/reshape/downsample_filter_tools.py
define_parameters
jessicavers/Savu
39
python
def define_parameters(self): "\n bin_size:\n visibility: basic\n dtype: int\n description: Bin Size for the downsample.\n default: 3\n mode:\n visibility: basic\n dtype: str\n description: One of 'mean', 'median', 'min', 'max'.\n default: mean\n options: [mean,median,min,max]\n pattern:\n visibility: basic\n dtype: str\n description: One of 'PROJECTION' or 'SINOGRAM' or 'VOLUME_XZ'.\n default: PROJECTION\n options: ['PROJECTION', 'SINOGRAM', 'VOLUME_XZ']\n num_bit:\n visibility: basic\n dtype: int\n description: Bit depth of the rescaled data (8, 16 or 32).\n default: 32\n options: [8,16,32]\n flip_updown:\n visibility: basic\n dtype: bool\n description: Flip images up-down.\n default: True\n flip_leftright:\n visibility: basic\n dtype: bool\n description: Flip images left-right.\n default: False\n rotate_angle:\n visibility: basic\n dtype: [float, str, list[float], dict{int:float}]\n description: Rotate images by a given angle (Degree).\n default: 0.0\n max:\n visibility: basic\n dtype: [None,float]\n description: Global max for scaling.\n default: None\n min:\n visibility: basic\n dtype: [None,float]\n description: Global min for scaling.\n default: None\n\n "
def define_parameters(self): "\n bin_size:\n visibility: basic\n dtype: int\n description: Bin Size for the downsample.\n default: 3\n mode:\n visibility: basic\n dtype: str\n description: One of 'mean', 'median', 'min', 'max'.\n default: mean\n options: [mean,median,min,max]\n pattern:\n visibility: basic\n dtype: str\n description: One of 'PROJECTION' or 'SINOGRAM' or 'VOLUME_XZ'.\n default: PROJECTION\n options: ['PROJECTION', 'SINOGRAM', 'VOLUME_XZ']\n num_bit:\n visibility: basic\n dtype: int\n description: Bit depth of the rescaled data (8, 16 or 32).\n default: 32\n options: [8,16,32]\n flip_updown:\n visibility: basic\n dtype: bool\n description: Flip images up-down.\n default: True\n flip_leftright:\n visibility: basic\n dtype: bool\n description: Flip images left-right.\n default: False\n rotate_angle:\n visibility: basic\n dtype: [float, str, list[float], dict{int:float}]\n description: Rotate images by a given angle (Degree).\n default: 0.0\n max:\n visibility: basic\n dtype: [None,float]\n description: Global max for scaling.\n default: None\n min:\n visibility: basic\n dtype: [None,float]\n description: Global min for scaling.\n default: None\n\n "<|docstring|>bin_size: visibility: basic dtype: int description: Bin Size for the downsample. default: 3 mode: visibility: basic dtype: str description: One of 'mean', 'median', 'min', 'max'. default: mean options: [mean,median,min,max] pattern: visibility: basic dtype: str description: One of 'PROJECTION' or 'SINOGRAM' or 'VOLUME_XZ'. default: PROJECTION options: ['PROJECTION', 'SINOGRAM', 'VOLUME_XZ'] num_bit: visibility: basic dtype: int description: Bit depth of the rescaled data (8, 16 or 32). default: 32 options: [8,16,32] flip_updown: visibility: basic dtype: bool description: Flip images up-down. default: True flip_leftright: visibility: basic dtype: bool description: Flip images left-right. default: False rotate_angle: visibility: basic dtype: [float, str, list[float], dict{int:float}] description: Rotate images by a given angle (Degree). default: 0.0 max: visibility: basic dtype: [None,float] description: Global max for scaling. default: None min: visibility: basic dtype: [None,float] description: Global min for scaling. default: None<|endoftext|>
795cf87fcaf605023141b364593e6be1b86ad679259a503f95cbb9520d52c5c2
def init_connection_engine(): ' initialize database setup\n Takes in os variables from environment if on GCP\n Reads in local variables that will be ignored in public repository.\n Returns:\n pool -- a connection to GCP MySQL\n ' if (os.environ.get('GAE_ENV') != 'standard'): try: variables = load(open('app.yaml'), Loader=Loader) except OSError as e: print('Make sure you have the app.yaml file setup') os.exit() env_variables = variables['env_variables'] for var in env_variables: os.environ[var] = env_variables[var] pool = sqlalchemy.create_engine(sqlalchemy.engine.url.URL(drivername='mysql+pymysql', username=os.environ.get('MYSQL_USER'), password=os.environ.get('MYSQL_PASSWORD'), database=os.environ.get('MYSQL_DB'), host=os.environ.get('MYSQL_HOST'), query={'unix_socket': '{}/{}'.format('/cloudsql', 'ezapartment:us-central1:yoursql')})) return pool
initialize database setup Takes in os variables from environment if on GCP Reads in local variables that will be ignored in public repository. Returns: pool -- a connection to GCP MySQL
app/__init__.py
init_connection_engine
zatch10/ezapartment
0
python
def init_connection_engine(): ' initialize database setup\n Takes in os variables from environment if on GCP\n Reads in local variables that will be ignored in public repository.\n Returns:\n pool -- a connection to GCP MySQL\n ' if (os.environ.get('GAE_ENV') != 'standard'): try: variables = load(open('app.yaml'), Loader=Loader) except OSError as e: print('Make sure you have the app.yaml file setup') os.exit() env_variables = variables['env_variables'] for var in env_variables: os.environ[var] = env_variables[var] pool = sqlalchemy.create_engine(sqlalchemy.engine.url.URL(drivername='mysql+pymysql', username=os.environ.get('MYSQL_USER'), password=os.environ.get('MYSQL_PASSWORD'), database=os.environ.get('MYSQL_DB'), host=os.environ.get('MYSQL_HOST'), query={'unix_socket': '{}/{}'.format('/cloudsql', 'ezapartment:us-central1:yoursql')})) return pool
def init_connection_engine(): ' initialize database setup\n Takes in os variables from environment if on GCP\n Reads in local variables that will be ignored in public repository.\n Returns:\n pool -- a connection to GCP MySQL\n ' if (os.environ.get('GAE_ENV') != 'standard'): try: variables = load(open('app.yaml'), Loader=Loader) except OSError as e: print('Make sure you have the app.yaml file setup') os.exit() env_variables = variables['env_variables'] for var in env_variables: os.environ[var] = env_variables[var] pool = sqlalchemy.create_engine(sqlalchemy.engine.url.URL(drivername='mysql+pymysql', username=os.environ.get('MYSQL_USER'), password=os.environ.get('MYSQL_PASSWORD'), database=os.environ.get('MYSQL_DB'), host=os.environ.get('MYSQL_HOST'), query={'unix_socket': '{}/{}'.format('/cloudsql', 'ezapartment:us-central1:yoursql')})) return pool<|docstring|>initialize database setup Takes in os variables from environment if on GCP Reads in local variables that will be ignored in public repository. Returns: pool -- a connection to GCP MySQL<|endoftext|>
79abf3ee1236d2d0dc898275c99816d17e1451e1d7511cbf0ffd3d4b90e69e54
def get_model_fields(model): "Devolve os atributos de um modelo que correspondem a 'fields'!!!" ordered_fields_dict = {} for var in vars(model): if ('widgets' in str(vars(model)[var])): field = eval(('model.' + var)) ordered_fields_dict[field.view_order] = (var, field) for key in ordered_fields_dict.keys(): (yield ordered_fields_dict[key])
Devolve os atributos de um modelo que correspondem a 'fields'!!!
core/auth.py
get_model_fields
aanacleto/ERP4R
0
python
def get_model_fields(model): ordered_fields_dict = {} for var in vars(model): if ('widgets' in str(vars(model)[var])): field = eval(('model.' + var)) ordered_fields_dict[field.view_order] = (var, field) for key in ordered_fields_dict.keys(): (yield ordered_fields_dict[key])
def get_model_fields(model): ordered_fields_dict = {} for var in vars(model): if ('widgets' in str(vars(model)[var])): field = eval(('model.' + var)) ordered_fields_dict[field.view_order] = (var, field) for key in ordered_fields_dict.keys(): (yield ordered_fields_dict[key])<|docstring|>Devolve os atributos de um modelo que correspondem a 'fields'!!!<|endoftext|>
e1f0f9417b161c934eed804e44fe6df7106d46c98494734e2f29e1739507db24
def verify_orm_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no ORM' def secure(self, *args, **kargs): result = verify_rights(model=self, action=target.__name__) if (result == True): return target(self, *args, **kargs) else: return result return secure
Method decorator to verify if the user have the right to do what is requiring Esta abordagem só serve no ORM
core/auth.py
verify_orm_rights
aanacleto/ERP4R
0
python
def verify_orm_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no ORM' def secure(self, *args, **kargs): result = verify_rights(model=self, action=target.__name__) if (result == True): return target(self, *args, **kargs) else: return result return secure
def verify_orm_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no ORM' def secure(self, *args, **kargs): result = verify_rights(model=self, action=target.__name__) if (result == True): return target(self, *args, **kargs) else: return result return secure<|docstring|>Method decorator to verify if the user have the right to do what is requiring Esta abordagem só serve no ORM<|endoftext|>
61ed99793ca67fe5936aafde44751e99109ae0d0b7e53808a8052df7f8b73d8b
def verify_form_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no formulário' def secure(*args, **kargs): from utils import get_context, set_context import objs window_id = kargs.get('window_id') ctx_dict = get_context(window_id) model_name = ctx_dict.get('model_name') model = eval('objs.{model_name}()'.format(model_name=model_name)) result = verify_rights(model=model, action=target.__name__) if (result == True): return target(*args, **kargs) elif isinstance(result, list): ctx_dict['rights'] = result set_context(window_id, ctx_dict) return target(*args, **kargs) else: return result return secure
Method decorator to verify if the user have the right to do what is requiring Esta abordagem só serve no formulário
core/auth.py
verify_form_rights
aanacleto/ERP4R
0
python
def verify_form_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no formulário' def secure(*args, **kargs): from utils import get_context, set_context import objs window_id = kargs.get('window_id') ctx_dict = get_context(window_id) model_name = ctx_dict.get('model_name') model = eval('objs.{model_name}()'.format(model_name=model_name)) result = verify_rights(model=model, action=target.__name__) if (result == True): return target(*args, **kargs) elif isinstance(result, list): ctx_dict['rights'] = result set_context(window_id, ctx_dict) return target(*args, **kargs) else: return result return secure
def verify_form_rights(target): 'Method decorator to verify if the user have the right to do what is requiring\n Esta abordagem só serve no formulário' def secure(*args, **kargs): from utils import get_context, set_context import objs window_id = kargs.get('window_id') ctx_dict = get_context(window_id) model_name = ctx_dict.get('model_name') model = eval('objs.{model_name}()'.format(model_name=model_name)) result = verify_rights(model=model, action=target.__name__) if (result == True): return target(*args, **kargs) elif isinstance(result, list): ctx_dict['rights'] = result set_context(window_id, ctx_dict) return target(*args, **kargs) else: return result return secure<|docstring|>Method decorator to verify if the user have the right to do what is requiring Esta abordagem só serve no formulário<|endoftext|>
583d963a8c7968052bcf3af52fd2b253a30b717a95d673e7ec0ffff5d28ff8ec
def paginate(query, page=None, per_page=None, error_out=True): "Clone from flask_sqlachemy.Pagination\n Returns `per_page` items from page `page`. By default it will\n abort with 404 if no items were found and the page was larger than\n 1. This behavor can be disabled by setting `error_out` to `False`.\n\n If page or per_page are None, they will be retrieved from the\n request query. If the values are not ints and ``error_out`` is\n true, it will abort with 404. If there is no request or they\n aren't in the query, they default to page 1 and 20\n respectively.\n\n Returns an :class:`Pagination` object.\n " if has_request_context(): if (page is None): try: page = int(request.args.get('page', 1)) except (TypeError, ValueError): if error_out: abort(404) page = 1 if (per_page is None): try: per_page = int(request.args.get('per_page', 20)) except (TypeError, ValueError): if error_out: abort(404) per_page = 20 else: if (page is None): page = 1 if (per_page is None): per_page = 20 if (error_out and (page < 1)): abort(404) items = query.limit(per_page).offset(((page - 1) * per_page)).all() if ((not items) and (page != 1) and error_out): abort(404) if ((page == 1) and (len(items) < per_page)): total = len(items) else: total = query.order_by(None).count() return Pagination(query, page, per_page, total, items)
Clone from flask_sqlachemy.Pagination Returns `per_page` items from page `page`. By default it will abort with 404 if no items were found and the page was larger than 1. This behavor can be disabled by setting `error_out` to `False`. If page or per_page are None, they will be retrieved from the request query. If the values are not ints and ``error_out`` is true, it will abort with 404. If there is no request or they aren't in the query, they default to page 1 and 20 respectively. Returns an :class:`Pagination` object.
flask_kits1/restful/pagination.py
paginate
by46/coffee
0
python
def paginate(query, page=None, per_page=None, error_out=True): "Clone from flask_sqlachemy.Pagination\n Returns `per_page` items from page `page`. By default it will\n abort with 404 if no items were found and the page was larger than\n 1. This behavor can be disabled by setting `error_out` to `False`.\n\n If page or per_page are None, they will be retrieved from the\n request query. If the values are not ints and ``error_out`` is\n true, it will abort with 404. If there is no request or they\n aren't in the query, they default to page 1 and 20\n respectively.\n\n Returns an :class:`Pagination` object.\n " if has_request_context(): if (page is None): try: page = int(request.args.get('page', 1)) except (TypeError, ValueError): if error_out: abort(404) page = 1 if (per_page is None): try: per_page = int(request.args.get('per_page', 20)) except (TypeError, ValueError): if error_out: abort(404) per_page = 20 else: if (page is None): page = 1 if (per_page is None): per_page = 20 if (error_out and (page < 1)): abort(404) items = query.limit(per_page).offset(((page - 1) * per_page)).all() if ((not items) and (page != 1) and error_out): abort(404) if ((page == 1) and (len(items) < per_page)): total = len(items) else: total = query.order_by(None).count() return Pagination(query, page, per_page, total, items)
def paginate(query, page=None, per_page=None, error_out=True): "Clone from flask_sqlachemy.Pagination\n Returns `per_page` items from page `page`. By default it will\n abort with 404 if no items were found and the page was larger than\n 1. This behavor can be disabled by setting `error_out` to `False`.\n\n If page or per_page are None, they will be retrieved from the\n request query. If the values are not ints and ``error_out`` is\n true, it will abort with 404. If there is no request or they\n aren't in the query, they default to page 1 and 20\n respectively.\n\n Returns an :class:`Pagination` object.\n " if has_request_context(): if (page is None): try: page = int(request.args.get('page', 1)) except (TypeError, ValueError): if error_out: abort(404) page = 1 if (per_page is None): try: per_page = int(request.args.get('per_page', 20)) except (TypeError, ValueError): if error_out: abort(404) per_page = 20 else: if (page is None): page = 1 if (per_page is None): per_page = 20 if (error_out and (page < 1)): abort(404) items = query.limit(per_page).offset(((page - 1) * per_page)).all() if ((not items) and (page != 1) and error_out): abort(404) if ((page == 1) and (len(items) < per_page)): total = len(items) else: total = query.order_by(None).count() return Pagination(query, page, per_page, total, items)<|docstring|>Clone from flask_sqlachemy.Pagination Returns `per_page` items from page `page`. By default it will abort with 404 if no items were found and the page was larger than 1. This behavor can be disabled by setting `error_out` to `False`. If page or per_page are None, they will be retrieved from the request query. If the values are not ints and ``error_out`` is true, it will abort with 404. If there is no request or they aren't in the query, they default to page 1 and 20 respectively. Returns an :class:`Pagination` object.<|endoftext|>
5501dc108b2a296f1cc7919d7a1f9910314c101f1dbacdbdd4e3e3466f4c27de
def __init__(self, serializer_fields, envelope=None, item_builder=None): 'decorator initialize for Paginate\n\n :param serializer_fields: a dict of whose keys will make up the final\n serialized response output\n :param envelope: optional key that will be used to envelop the serialized\n response\n :param item_builder: optional key that will be used to rebuild item for group_by\n :return:\n ' super(Paginate, self).__init__(self.rebuild_fields(serializer_fields), envelope=envelope) self.item_builder = item_builder
decorator initialize for Paginate :param serializer_fields: a dict of whose keys will make up the final serialized response output :param envelope: optional key that will be used to envelop the serialized response :param item_builder: optional key that will be used to rebuild item for group_by :return:
flask_kits1/restful/pagination.py
__init__
by46/coffee
0
python
def __init__(self, serializer_fields, envelope=None, item_builder=None): 'decorator initialize for Paginate\n\n :param serializer_fields: a dict of whose keys will make up the final\n serialized response output\n :param envelope: optional key that will be used to envelop the serialized\n response\n :param item_builder: optional key that will be used to rebuild item for group_by\n :return:\n ' super(Paginate, self).__init__(self.rebuild_fields(serializer_fields), envelope=envelope) self.item_builder = item_builder
def __init__(self, serializer_fields, envelope=None, item_builder=None): 'decorator initialize for Paginate\n\n :param serializer_fields: a dict of whose keys will make up the final\n serialized response output\n :param envelope: optional key that will be used to envelop the serialized\n response\n :param item_builder: optional key that will be used to rebuild item for group_by\n :return:\n ' super(Paginate, self).__init__(self.rebuild_fields(serializer_fields), envelope=envelope) self.item_builder = item_builder<|docstring|>decorator initialize for Paginate :param serializer_fields: a dict of whose keys will make up the final serialized response output :param envelope: optional key that will be used to envelop the serialized response :param item_builder: optional key that will be used to rebuild item for group_by :return:<|endoftext|>
fe5560212cf34178fc7746a28974f19dff953831b25187e0ba8c10120640aca2
def test_convert_token_and_id(self): 'Test ``_convert_token_to_id`` and ``_convert_id_to_token``.' token = '<pad>' token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
Test ``_convert_token_to_id`` and ``_convert_id_to_token``.
tests/test_tokenization_deberta_v2.py
test_convert_token_and_id
yjyin19/transformers
2
python
def test_convert_token_and_id(self): token = '<pad>' token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_convert_token_and_id(self): token = '<pad>' token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)<|docstring|>Test ``_convert_token_to_id`` and ``_convert_id_to_token``.<|endoftext|>
ed83189ce1ec875bfb139ef26a1aa0d59293d62248e2cbf72d5c8d983f9f6b38
def testIntotoLinkArtifact(self): 'Test IntotoLinkArtifact' pass
Test IntotoLinkArtifact
test/test_intoto_link_artifact.py
testIntotoLinkArtifact
hanyuwang1993/client-python
0
python
def testIntotoLinkArtifact(self): pass
def testIntotoLinkArtifact(self): pass<|docstring|>Test IntotoLinkArtifact<|endoftext|>
71b580dc48f64f48b1c5f5bf66e26f92d47811f0ffd01aaf4ec646b0e8fa7797
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' confusion_matrix_dir = './confusion_matrix_plots' if (not os.path.exists(confusion_matrix_dir)): os.mkdir(confusion_matrix_dir) plt.cla() plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) print('Normalized confusion matrix') else: print('Confusion matrix, without normalization') print(cm) thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('#BFD1D4' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if normalize: plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg')) else: plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
util.py
plot_confusion_matrix
oleksandrlazariev/keras-transfer-learning-for-oxford102
287
python
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' confusion_matrix_dir = './confusion_matrix_plots' if (not os.path.exists(confusion_matrix_dir)): os.mkdir(confusion_matrix_dir) plt.cla() plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) print('Normalized confusion matrix') else: print('Confusion matrix, without normalization') print(cm) thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('#BFD1D4' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if normalize: plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg')) else: plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): '\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n ' confusion_matrix_dir = './confusion_matrix_plots' if (not os.path.exists(confusion_matrix_dir)): os.mkdir(confusion_matrix_dir) plt.cla() plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)]) print('Normalized confusion matrix') else: print('Confusion matrix, without normalization') print(cm) thresh = (cm.max() / 2.0) for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('#BFD1D4' if (cm[(i, j)] > thresh) else 'black')) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') if normalize: plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg')) else: plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))<|docstring|>This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.<|endoftext|>
b685a974abd0688a0f7b49d09bba0c05b414da9fdbe78a45c401f159d0599ed0
def set_samples_info(): 'Walks through the train and valid directories\n and returns number of images' white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'} dirs_info = {config.train_dir: 0, config.validation_dir: 0} for d in dirs_info: iglob_iter = glob.iglob((d + '**/*.*')) for i in iglob_iter: (filename, file_extension) = os.path.splitext(i) if (file_extension[1:] in white_list_formats): dirs_info[d] += 1 config.nb_train_samples = dirs_info[config.train_dir] config.nb_validation_samples = dirs_info[config.validation_dir]
Walks through the train and valid directories and returns number of images
util.py
set_samples_info
oleksandrlazariev/keras-transfer-learning-for-oxford102
287
python
def set_samples_info(): 'Walks through the train and valid directories\n and returns number of images' white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'} dirs_info = {config.train_dir: 0, config.validation_dir: 0} for d in dirs_info: iglob_iter = glob.iglob((d + '**/*.*')) for i in iglob_iter: (filename, file_extension) = os.path.splitext(i) if (file_extension[1:] in white_list_formats): dirs_info[d] += 1 config.nb_train_samples = dirs_info[config.train_dir] config.nb_validation_samples = dirs_info[config.validation_dir]
def set_samples_info(): 'Walks through the train and valid directories\n and returns number of images' white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'} dirs_info = {config.train_dir: 0, config.validation_dir: 0} for d in dirs_info: iglob_iter = glob.iglob((d + '**/*.*')) for i in iglob_iter: (filename, file_extension) = os.path.splitext(i) if (file_extension[1:] in white_list_formats): dirs_info[d] += 1 config.nb_train_samples = dirs_info[config.train_dir] config.nb_validation_samples = dirs_info[config.validation_dir]<|docstring|>Walks through the train and valid directories and returns number of images<|endoftext|>
c19cfe0eeafa366c6e823da3f8a15684cb91257f52e601a25404356399bda2d0
def set_classes_from_train_dir(): 'Returns classes based on directories in train directory' d = config.train_dir config.classes = sorted([o for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))])
Returns classes based on directories in train directory
util.py
set_classes_from_train_dir
oleksandrlazariev/keras-transfer-learning-for-oxford102
287
python
def set_classes_from_train_dir(): d = config.train_dir config.classes = sorted([o for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))])
def set_classes_from_train_dir(): d = config.train_dir config.classes = sorted([o for o in os.listdir(d) if os.path.isdir(os.path.join(d, o))])<|docstring|>Returns classes based on directories in train directory<|endoftext|>
c947ac536b215cf1ac37d5dd8ad08b59c0562b509d563a1de52e806b8e5a7b67
def override_keras_directory_iterator_next(): 'Overrides .next method of DirectoryIterator in Keras\n to reorder color channels for images from RGB to BGR' from keras.preprocessing.image import DirectoryIterator original_next = DirectoryIterator.next if ('custom_next' in str(original_next)): return def custom_next(self): (batch_x, batch_y) = original_next(self) batch_x = batch_x[(:, ::(- 1), :, :)] return (batch_x, batch_y) DirectoryIterator.next = custom_next
Overrides .next method of DirectoryIterator in Keras to reorder color channels for images from RGB to BGR
util.py
override_keras_directory_iterator_next
oleksandrlazariev/keras-transfer-learning-for-oxford102
287
python
def override_keras_directory_iterator_next(): 'Overrides .next method of DirectoryIterator in Keras\n to reorder color channels for images from RGB to BGR' from keras.preprocessing.image import DirectoryIterator original_next = DirectoryIterator.next if ('custom_next' in str(original_next)): return def custom_next(self): (batch_x, batch_y) = original_next(self) batch_x = batch_x[(:, ::(- 1), :, :)] return (batch_x, batch_y) DirectoryIterator.next = custom_next
def override_keras_directory_iterator_next(): 'Overrides .next method of DirectoryIterator in Keras\n to reorder color channels for images from RGB to BGR' from keras.preprocessing.image import DirectoryIterator original_next = DirectoryIterator.next if ('custom_next' in str(original_next)): return def custom_next(self): (batch_x, batch_y) = original_next(self) batch_x = batch_x[(:, ::(- 1), :, :)] return (batch_x, batch_y) DirectoryIterator.next = custom_next<|docstring|>Overrides .next method of DirectoryIterator in Keras to reorder color channels for images from RGB to BGR<|endoftext|>
935d7d8840ee4a3d857848dea795ceffc7d0d2bec191f03b8c42685d8fd836f5
@staticmethod def get_best_type(entity_types, sep='.'): 'determines best entity type on a majority-win basis' if (len(entity_types) == 1): return list(entity_types)[0] entity_types = [t.split(sep) for t in sorted(list(entity_types))] basetype_to_count = defaultdict(int) for t in entity_types: basetype_to_count[t[0]] += 1 majority_basetype = max(list(basetype_to_count.keys()), key=(lambda x: basetype_to_count[x])) subtype_candidates = [t for t in entity_types if ((len(t) > 1) and (t[0] == majority_basetype))] if (len(subtype_candidates) == 0): return majority_basetype subtype_to_count = defaultdict(int) for t in subtype_candidates: subtype_to_count[t[1]] += 1 majority_subtype = max(list(subtype_to_count.keys()), key=(lambda x: subtype_to_count[x])) if (list(subtype_to_count.values()).count(subtype_to_count[majority_subtype]) > 1): return majority_basetype subsubtype_candidates = [t for t in entity_types if ((len(t) == 3) and (t[1] == majority_subtype))] if (len(subsubtype_candidates) == 0): return sep.join([majority_basetype, majority_subtype]) subsubtype_to_count = defaultdict(int) for t in subsubtype_candidates: subsubtype_to_count[t[2]] += 1 majority_subsubtype = max(list(subsubtype_to_count.keys()), key=(lambda x: subsubtype_to_count[x])) if (list(subsubtype_to_count.values()).count(subsubtype_to_count[majority_subtype]) > 1): return sep.join([majority_basetype, majority_subtype]) return sep.join([majority_basetype, majority_subtype, majority_subsubtype])
determines best entity type on a majority-win basis
serif/model/mention_coref_model.py
get_best_type
BBN-E/ZS4IE
7
python
@staticmethod def get_best_type(entity_types, sep='.'): if (len(entity_types) == 1): return list(entity_types)[0] entity_types = [t.split(sep) for t in sorted(list(entity_types))] basetype_to_count = defaultdict(int) for t in entity_types: basetype_to_count[t[0]] += 1 majority_basetype = max(list(basetype_to_count.keys()), key=(lambda x: basetype_to_count[x])) subtype_candidates = [t for t in entity_types if ((len(t) > 1) and (t[0] == majority_basetype))] if (len(subtype_candidates) == 0): return majority_basetype subtype_to_count = defaultdict(int) for t in subtype_candidates: subtype_to_count[t[1]] += 1 majority_subtype = max(list(subtype_to_count.keys()), key=(lambda x: subtype_to_count[x])) if (list(subtype_to_count.values()).count(subtype_to_count[majority_subtype]) > 1): return majority_basetype subsubtype_candidates = [t for t in entity_types if ((len(t) == 3) and (t[1] == majority_subtype))] if (len(subsubtype_candidates) == 0): return sep.join([majority_basetype, majority_subtype]) subsubtype_to_count = defaultdict(int) for t in subsubtype_candidates: subsubtype_to_count[t[2]] += 1 majority_subsubtype = max(list(subsubtype_to_count.keys()), key=(lambda x: subsubtype_to_count[x])) if (list(subsubtype_to_count.values()).count(subsubtype_to_count[majority_subtype]) > 1): return sep.join([majority_basetype, majority_subtype]) return sep.join([majority_basetype, majority_subtype, majority_subsubtype])
@staticmethod def get_best_type(entity_types, sep='.'): if (len(entity_types) == 1): return list(entity_types)[0] entity_types = [t.split(sep) for t in sorted(list(entity_types))] basetype_to_count = defaultdict(int) for t in entity_types: basetype_to_count[t[0]] += 1 majority_basetype = max(list(basetype_to_count.keys()), key=(lambda x: basetype_to_count[x])) subtype_candidates = [t for t in entity_types if ((len(t) > 1) and (t[0] == majority_basetype))] if (len(subtype_candidates) == 0): return majority_basetype subtype_to_count = defaultdict(int) for t in subtype_candidates: subtype_to_count[t[1]] += 1 majority_subtype = max(list(subtype_to_count.keys()), key=(lambda x: subtype_to_count[x])) if (list(subtype_to_count.values()).count(subtype_to_count[majority_subtype]) > 1): return majority_basetype subsubtype_candidates = [t for t in entity_types if ((len(t) == 3) and (t[1] == majority_subtype))] if (len(subsubtype_candidates) == 0): return sep.join([majority_basetype, majority_subtype]) subsubtype_to_count = defaultdict(int) for t in subsubtype_candidates: subsubtype_to_count[t[2]] += 1 majority_subsubtype = max(list(subsubtype_to_count.keys()), key=(lambda x: subsubtype_to_count[x])) if (list(subsubtype_to_count.values()).count(subsubtype_to_count[majority_subtype]) > 1): return sep.join([majority_basetype, majority_subtype]) return sep.join([majority_basetype, majority_subtype, majority_subsubtype])<|docstring|>determines best entity type on a majority-win basis<|endoftext|>
114fbf558a8e1ea3529ddc743fed7f53cad78edbbf076936a5b21ef2c85d1325
def vel_to_freq(vel_or_freq, rest_freq=(1.42040575177 * u.GHz), unit=u.Hz): '\n Using radio velocity here.\n ' equiv = u.doppler_radio(rest_freq) return vel_or_freq.to(unit, equiv)
Using radio velocity here.
15A-175/HI/imaging/match_and_split.py
vel_to_freq
Astroua/LocalGroup-VLA
1
python
def vel_to_freq(vel_or_freq, rest_freq=(1.42040575177 * u.GHz), unit=u.Hz): '\n \n ' equiv = u.doppler_radio(rest_freq) return vel_or_freq.to(unit, equiv)
def vel_to_freq(vel_or_freq, rest_freq=(1.42040575177 * u.GHz), unit=u.Hz): '\n \n ' equiv = u.doppler_radio(rest_freq) return vel_or_freq.to(unit, equiv)<|docstring|>Using radio velocity here.<|endoftext|>
17007bc830836899c95eca3cf3cc6387c67cd33bfe90975caa958579d13e5635
@_timing.time def eval_sequence(self, data): 'Calculates ID metrics for one sequence' res = {} for field in self.fields: res[field] = 0 if (data['num_tracker_dets'] == 0): res['IDFN'] = data['num_gt_dets'] return res if (data['num_gt_dets'] == 0): res['IDFP'] = data['num_tracker_dets'] return res potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids'])) gt_id_count = np.zeros(data['num_gt_ids']) tracker_id_count = np.zeros(data['num_tracker_ids']) for (t, (gt_ids_t, tracker_ids_t)) in enumerate(zip(data['gt_ids'], data['tracker_ids'])): matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold) (match_idx_gt, match_idx_tracker) = np.nonzero(matches_mask) potential_matches_count[(gt_ids_t[match_idx_gt], tracker_ids_t[match_idx_tracker])] += 1 gt_id_count[gt_ids_t] += 1 tracker_id_count[tracker_ids_t] += 1 num_gt_ids = data['num_gt_ids'] num_tracker_ids = data['num_tracker_ids'] fp_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fn_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fp_mat[(num_gt_ids:, :num_tracker_ids)] = 10000000000.0 fn_mat[(:num_gt_ids, num_tracker_ids:)] = 10000000000.0 for gt_id in range(num_gt_ids): fn_mat[(gt_id, :num_tracker_ids)] = gt_id_count[gt_id] fn_mat[(gt_id, (num_tracker_ids + gt_id))] = gt_id_count[gt_id] for tracker_id in range(num_tracker_ids): fp_mat[(:num_gt_ids, tracker_id)] = tracker_id_count[tracker_id] fp_mat[((tracker_id + num_gt_ids), tracker_id)] = tracker_id_count[tracker_id] fn_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count fp_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count (match_rows, match_cols) = linear_sum_assignment((fn_mat + fp_mat)) res['IDFN'] = fn_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDFP'] = fp_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int) res = self._compute_final_fields(res) return res
Calculates ID metrics for one sequence
eval/trackeval/metrics/identity.py
eval_sequence
philip-fu/UniTrack
240
python
@_timing.time def eval_sequence(self, data): res = {} for field in self.fields: res[field] = 0 if (data['num_tracker_dets'] == 0): res['IDFN'] = data['num_gt_dets'] return res if (data['num_gt_dets'] == 0): res['IDFP'] = data['num_tracker_dets'] return res potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids'])) gt_id_count = np.zeros(data['num_gt_ids']) tracker_id_count = np.zeros(data['num_tracker_ids']) for (t, (gt_ids_t, tracker_ids_t)) in enumerate(zip(data['gt_ids'], data['tracker_ids'])): matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold) (match_idx_gt, match_idx_tracker) = np.nonzero(matches_mask) potential_matches_count[(gt_ids_t[match_idx_gt], tracker_ids_t[match_idx_tracker])] += 1 gt_id_count[gt_ids_t] += 1 tracker_id_count[tracker_ids_t] += 1 num_gt_ids = data['num_gt_ids'] num_tracker_ids = data['num_tracker_ids'] fp_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fn_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fp_mat[(num_gt_ids:, :num_tracker_ids)] = 10000000000.0 fn_mat[(:num_gt_ids, num_tracker_ids:)] = 10000000000.0 for gt_id in range(num_gt_ids): fn_mat[(gt_id, :num_tracker_ids)] = gt_id_count[gt_id] fn_mat[(gt_id, (num_tracker_ids + gt_id))] = gt_id_count[gt_id] for tracker_id in range(num_tracker_ids): fp_mat[(:num_gt_ids, tracker_id)] = tracker_id_count[tracker_id] fp_mat[((tracker_id + num_gt_ids), tracker_id)] = tracker_id_count[tracker_id] fn_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count fp_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count (match_rows, match_cols) = linear_sum_assignment((fn_mat + fp_mat)) res['IDFN'] = fn_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDFP'] = fp_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int) res = self._compute_final_fields(res) return res
@_timing.time def eval_sequence(self, data): res = {} for field in self.fields: res[field] = 0 if (data['num_tracker_dets'] == 0): res['IDFN'] = data['num_gt_dets'] return res if (data['num_gt_dets'] == 0): res['IDFP'] = data['num_tracker_dets'] return res potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids'])) gt_id_count = np.zeros(data['num_gt_ids']) tracker_id_count = np.zeros(data['num_tracker_ids']) for (t, (gt_ids_t, tracker_ids_t)) in enumerate(zip(data['gt_ids'], data['tracker_ids'])): matches_mask = np.greater_equal(data['similarity_scores'][t], self.threshold) (match_idx_gt, match_idx_tracker) = np.nonzero(matches_mask) potential_matches_count[(gt_ids_t[match_idx_gt], tracker_ids_t[match_idx_tracker])] += 1 gt_id_count[gt_ids_t] += 1 tracker_id_count[tracker_ids_t] += 1 num_gt_ids = data['num_gt_ids'] num_tracker_ids = data['num_tracker_ids'] fp_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fn_mat = np.zeros(((num_gt_ids + num_tracker_ids), (num_gt_ids + num_tracker_ids))) fp_mat[(num_gt_ids:, :num_tracker_ids)] = 10000000000.0 fn_mat[(:num_gt_ids, num_tracker_ids:)] = 10000000000.0 for gt_id in range(num_gt_ids): fn_mat[(gt_id, :num_tracker_ids)] = gt_id_count[gt_id] fn_mat[(gt_id, (num_tracker_ids + gt_id))] = gt_id_count[gt_id] for tracker_id in range(num_tracker_ids): fp_mat[(:num_gt_ids, tracker_id)] = tracker_id_count[tracker_id] fp_mat[((tracker_id + num_gt_ids), tracker_id)] = tracker_id_count[tracker_id] fn_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count fp_mat[(:num_gt_ids, :num_tracker_ids)] -= potential_matches_count (match_rows, match_cols) = linear_sum_assignment((fn_mat + fp_mat)) res['IDFN'] = fn_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDFP'] = fp_mat[(match_rows, match_cols)].sum().astype(np.int) res['IDTP'] = (gt_id_count.sum() - res['IDFN']).astype(np.int) res = self._compute_final_fields(res) return res<|docstring|>Calculates ID metrics for one sequence<|endoftext|>
b541bc1593d77af84887e88063938fa26eea72a48f92c905a0f16ffac25f3ccb
def combine_classes_class_averaged(self, all_res): 'Combines metrics across all classes by averaging over the class values' res = {} for field in self.integer_fields: res[field] = self._combine_sum({k: v for (k, v) in all_res.items() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))}, field) for field in self.float_fields: res[field] = np.mean([v[field] for v in all_res.values() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))], axis=0) return res
Combines metrics across all classes by averaging over the class values
eval/trackeval/metrics/identity.py
combine_classes_class_averaged
philip-fu/UniTrack
240
python
def combine_classes_class_averaged(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum({k: v for (k, v) in all_res.items() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))}, field) for field in self.float_fields: res[field] = np.mean([v[field] for v in all_res.values() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))], axis=0) return res
def combine_classes_class_averaged(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum({k: v for (k, v) in all_res.items() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))}, field) for field in self.float_fields: res[field] = np.mean([v[field] for v in all_res.values() if (((v['IDTP'] + v['IDFN']) + v['IDFP']) > (0 + np.finfo('float').eps))], axis=0) return res<|docstring|>Combines metrics across all classes by averaging over the class values<|endoftext|>
3033ff6f193eb090c4481f3ad24095923c1aa23b7c3f6c22836bd8487c044d4b
def combine_classes_det_averaged(self, all_res): 'Combines metrics across all classes by averaging over the detection values' res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res
Combines metrics across all classes by averaging over the detection values
eval/trackeval/metrics/identity.py
combine_classes_det_averaged
philip-fu/UniTrack
240
python
def combine_classes_det_averaged(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res
def combine_classes_det_averaged(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res<|docstring|>Combines metrics across all classes by averaging over the detection values<|endoftext|>
fa80d2f3af3bba0febb7468ddcfeccecb938dda0e6bcadb228467c1cc49e6ea7
def combine_sequences(self, all_res): 'Combines metrics across all sequences' res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res
Combines metrics across all sequences
eval/trackeval/metrics/identity.py
combine_sequences
philip-fu/UniTrack
240
python
def combine_sequences(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res
def combine_sequences(self, all_res): res = {} for field in self.integer_fields: res[field] = self._combine_sum(all_res, field) res = self._compute_final_fields(res) return res<|docstring|>Combines metrics across all sequences<|endoftext|>
dcad25f32b21ca658004843da59d716b5a0394784fb538a1c8ca8119034f7c51
@staticmethod def _compute_final_fields(res): "Calculate sub-metric ('field') values which only depend on other sub-metric values.\n This function is used both for both per-sequence calculation, and in combining values across sequences.\n " res['IDR'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFN']))) res['IDP'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFP']))) res['IDF1'] = (res['IDTP'] / np.maximum(1.0, ((res['IDTP'] + (0.5 * res['IDFP'])) + (0.5 * res['IDFN'])))) return res
Calculate sub-metric ('field') values which only depend on other sub-metric values. This function is used both for both per-sequence calculation, and in combining values across sequences.
eval/trackeval/metrics/identity.py
_compute_final_fields
philip-fu/UniTrack
240
python
@staticmethod def _compute_final_fields(res): "Calculate sub-metric ('field') values which only depend on other sub-metric values.\n This function is used both for both per-sequence calculation, and in combining values across sequences.\n " res['IDR'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFN']))) res['IDP'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFP']))) res['IDF1'] = (res['IDTP'] / np.maximum(1.0, ((res['IDTP'] + (0.5 * res['IDFP'])) + (0.5 * res['IDFN'])))) return res
@staticmethod def _compute_final_fields(res): "Calculate sub-metric ('field') values which only depend on other sub-metric values.\n This function is used both for both per-sequence calculation, and in combining values across sequences.\n " res['IDR'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFN']))) res['IDP'] = (res['IDTP'] / np.maximum(1.0, (res['IDTP'] + res['IDFP']))) res['IDF1'] = (res['IDTP'] / np.maximum(1.0, ((res['IDTP'] + (0.5 * res['IDFP'])) + (0.5 * res['IDFN'])))) return res<|docstring|>Calculate sub-metric ('field') values which only depend on other sub-metric values. This function is used both for both per-sequence calculation, and in combining values across sequences.<|endoftext|>
957e24b18c9ab9ceec675f2e40623d24805634ea988a5caaa73bc36b95c9adeb
def _is_collinear(self, other): 'Test if a segment is collinear with another segment\n\n :param other: The other segment.\n :return: True if the segments are collinear else False.\n ' if (almostequal(other, self) or almostequal(other, (- self))): return True a = (self.p1 - other.p1) b = (self.p1 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True a = (self.p2 - other.p1) b = (self.p2 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True return False
Test if a segment is collinear with another segment :param other: The other segment. :return: True if the segments are collinear else False.
geomeppy/geom/segments.py
_is_collinear
katsuya0719/geomeppy
29
python
def _is_collinear(self, other): 'Test if a segment is collinear with another segment\n\n :param other: The other segment.\n :return: True if the segments are collinear else False.\n ' if (almostequal(other, self) or almostequal(other, (- self))): return True a = (self.p1 - other.p1) b = (self.p1 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True a = (self.p2 - other.p1) b = (self.p2 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True return False
def _is_collinear(self, other): 'Test if a segment is collinear with another segment\n\n :param other: The other segment.\n :return: True if the segments are collinear else False.\n ' if (almostequal(other, self) or almostequal(other, (- self))): return True a = (self.p1 - other.p1) b = (self.p1 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True a = (self.p2 - other.p1) b = (self.p2 - other.p2) angle_between = a.cross(b) if almostequal(angle_between, Vector3D(0, 0, 0)): return True return False<|docstring|>Test if a segment is collinear with another segment :param other: The other segment. :return: True if the segments are collinear else False.<|endoftext|>
fe4b6415f84cbf965e4f7d548120d33bd85c575ed2b7dfffb40a6c086e6bc05a
def _on_poly_edge(self, poly): 'Test if segment lies on any edge of a polygon\n\n :param poly: The polygon to test against.\n :returns: True if segment lies on any edge of the polygon, else False.\n ' for edge in poly.edges: if self._is_collinear(edge): return True return False
Test if segment lies on any edge of a polygon :param poly: The polygon to test against. :returns: True if segment lies on any edge of the polygon, else False.
geomeppy/geom/segments.py
_on_poly_edge
katsuya0719/geomeppy
29
python
def _on_poly_edge(self, poly): 'Test if segment lies on any edge of a polygon\n\n :param poly: The polygon to test against.\n :returns: True if segment lies on any edge of the polygon, else False.\n ' for edge in poly.edges: if self._is_collinear(edge): return True return False
def _on_poly_edge(self, poly): 'Test if segment lies on any edge of a polygon\n\n :param poly: The polygon to test against.\n :returns: True if segment lies on any edge of the polygon, else False.\n ' for edge in poly.edges: if self._is_collinear(edge): return True return False<|docstring|>Test if segment lies on any edge of a polygon :param poly: The polygon to test against. :returns: True if segment lies on any edge of the polygon, else False.<|endoftext|>
a083f233cd516f26303fe051fd0186701460a0faf8e5fadbf97841258cb3beaa
@staticmethod def default_opts(): 'Builds an HParam object with default workflow options.' return tf.contrib.training.HParams(summarize=True, evaluate=True, train=True, training_progress_interval=0, prime=None, prime_num_frames=None, num_testing_batches=None, testing_progress_interval=1, clear_before_test=True, frame_padding_size=0, frame_padding_value=0.0, profile_file=None)
Builds an HParam object with default workflow options.
rsm/workflows/video_workflow.py
default_opts
Cerenaut/rsm
0
python
@staticmethod def default_opts(): return tf.contrib.training.HParams(summarize=True, evaluate=True, train=True, training_progress_interval=0, prime=None, prime_num_frames=None, num_testing_batches=None, testing_progress_interval=1, clear_before_test=True, frame_padding_size=0, frame_padding_value=0.0, profile_file=None)
@staticmethod def default_opts(): return tf.contrib.training.HParams(summarize=True, evaluate=True, train=True, training_progress_interval=0, prime=None, prime_num_frames=None, num_testing_batches=None, testing_progress_interval=1, clear_before_test=True, frame_padding_size=0, frame_padding_value=0.0, profile_file=None)<|docstring|>Builds an HParam object with default workflow options.<|endoftext|>
21582e72d3697b49a4744b41d8fb45ecfa50523e8ef74339adb882033f8b10b2
def _setup_dataset(self): 'Setup the dataset and retrieve inputs, labels and initializers' with tf.variable_scope('dataset'): self._dataset = self._dataset_type(self._dataset_location) self._dataset.set_batch_size(self._hparams.batch_size) train_dataset = self._dataset.get_train(options=self._opts) train_dataset = train_dataset.batch(self._hparams.batch_size, drop_remainder=True) train_dataset = train_dataset.prefetch(1) train_dataset = train_dataset.repeat() test_dataset = self._dataset.get_test(options=self._opts) test_dataset = test_dataset.batch(self._hparams.batch_size, drop_remainder=True) test_dataset = test_dataset.prefetch(1) test_dataset = test_dataset.repeat() self._placeholders['dataset_handle'] = tf.placeholder(tf.string, shape=[], name='dataset_handle') with tf.variable_scope('dataset_iterators'): self._iterator = tf.data.Iterator.from_string_handle(self._placeholders['dataset_handle'], train_dataset.output_types, train_dataset.output_shapes) self._init_iterators() self._dataset_iterators = {} with tf.variable_scope('train_dataset'): self._dataset_iterators['training'] = train_dataset.make_initializable_iterator() with tf.variable_scope('test_dataset'): self._dataset_iterators['testing'] = test_dataset.make_initializable_iterator()
Setup the dataset and retrieve inputs, labels and initializers
rsm/workflows/video_workflow.py
_setup_dataset
Cerenaut/rsm
0
python
def _setup_dataset(self): with tf.variable_scope('dataset'): self._dataset = self._dataset_type(self._dataset_location) self._dataset.set_batch_size(self._hparams.batch_size) train_dataset = self._dataset.get_train(options=self._opts) train_dataset = train_dataset.batch(self._hparams.batch_size, drop_remainder=True) train_dataset = train_dataset.prefetch(1) train_dataset = train_dataset.repeat() test_dataset = self._dataset.get_test(options=self._opts) test_dataset = test_dataset.batch(self._hparams.batch_size, drop_remainder=True) test_dataset = test_dataset.prefetch(1) test_dataset = test_dataset.repeat() self._placeholders['dataset_handle'] = tf.placeholder(tf.string, shape=[], name='dataset_handle') with tf.variable_scope('dataset_iterators'): self._iterator = tf.data.Iterator.from_string_handle(self._placeholders['dataset_handle'], train_dataset.output_types, train_dataset.output_shapes) self._init_iterators() self._dataset_iterators = {} with tf.variable_scope('train_dataset'): self._dataset_iterators['training'] = train_dataset.make_initializable_iterator() with tf.variable_scope('test_dataset'): self._dataset_iterators['testing'] = test_dataset.make_initializable_iterator()
def _setup_dataset(self): with tf.variable_scope('dataset'): self._dataset = self._dataset_type(self._dataset_location) self._dataset.set_batch_size(self._hparams.batch_size) train_dataset = self._dataset.get_train(options=self._opts) train_dataset = train_dataset.batch(self._hparams.batch_size, drop_remainder=True) train_dataset = train_dataset.prefetch(1) train_dataset = train_dataset.repeat() test_dataset = self._dataset.get_test(options=self._opts) test_dataset = test_dataset.batch(self._hparams.batch_size, drop_remainder=True) test_dataset = test_dataset.prefetch(1) test_dataset = test_dataset.repeat() self._placeholders['dataset_handle'] = tf.placeholder(tf.string, shape=[], name='dataset_handle') with tf.variable_scope('dataset_iterators'): self._iterator = tf.data.Iterator.from_string_handle(self._placeholders['dataset_handle'], train_dataset.output_types, train_dataset.output_shapes) self._init_iterators() self._dataset_iterators = {} with tf.variable_scope('train_dataset'): self._dataset_iterators['training'] = train_dataset.make_initializable_iterator() with tf.variable_scope('test_dataset'): self._dataset_iterators['testing'] = test_dataset.make_initializable_iterator()<|docstring|>Setup the dataset and retrieve inputs, labels and initializers<|endoftext|>
fe6c778336dbe5c57cae0db9c1b2aeafeb885d9e5d916a5928ad2b5e99596376
def _setup_component(self): 'Setup the component' labels_one_hot = tf.one_hot(self._labels, self._dataset.num_classes) labels_one_hot_shape = labels_one_hot.get_shape().as_list() self._component = self._component_type() self._component.build(self._inputs, self._dataset.shape, labels_one_hot, labels_one_hot_shape, self._hparams) if self._summarize: self._build_summaries()
Setup the component
rsm/workflows/video_workflow.py
_setup_component
Cerenaut/rsm
0
python
def _setup_component(self): labels_one_hot = tf.one_hot(self._labels, self._dataset.num_classes) labels_one_hot_shape = labels_one_hot.get_shape().as_list() self._component = self._component_type() self._component.build(self._inputs, self._dataset.shape, labels_one_hot, labels_one_hot_shape, self._hparams) if self._summarize: self._build_summaries()
def _setup_component(self): labels_one_hot = tf.one_hot(self._labels, self._dataset.num_classes) labels_one_hot_shape = labels_one_hot.get_shape().as_list() self._component = self._component_type() self._component.build(self._inputs, self._dataset.shape, labels_one_hot, labels_one_hot_shape, self._hparams) if self._summarize: self._build_summaries()<|docstring|>Setup the component<|endoftext|>
f0a22a4f2b7d0e59dc7ea3415b3b68b35e4dd66510cb9418f52e675759baa878
def _build_summaries(self): 'Build TensorBoard summaries for multiple modes.' batch_types = ['training', 'encoding'] if self._freeze_training: batch_types.remove('training') self._component.build_summaries(batch_types)
Build TensorBoard summaries for multiple modes.
rsm/workflows/video_workflow.py
_build_summaries
Cerenaut/rsm
0
python
def _build_summaries(self): batch_types = ['training', 'encoding'] if self._freeze_training: batch_types.remove('training') self._component.build_summaries(batch_types)
def _build_summaries(self): batch_types = ['training', 'encoding'] if self._freeze_training: batch_types.remove('training') self._component.build_summaries(batch_types)<|docstring|>Build TensorBoard summaries for multiple modes.<|endoftext|>
ffaec9e18232a39c11a12c0d599e2dedd86130e9e56da50f42ea78ba039127bb
def _get_status(self): 'Return some string proxy for the losses or errors being optimized' loss = self._component.get_values(SequenceMemoryStack.prediction_loss) return loss
Return some string proxy for the losses or errors being optimized
rsm/workflows/video_workflow.py
_get_status
Cerenaut/rsm
0
python
def _get_status(self): loss = self._component.get_values(SequenceMemoryStack.prediction_loss) return loss
def _get_status(self): loss = self._component.get_values(SequenceMemoryStack.prediction_loss) return loss<|docstring|>Return some string proxy for the losses or errors being optimized<|endoftext|>
f828a060d0a2204908f413cfdb62997298c585ae521766aace0749369b8c9543
def _compute_end_state_mask(self, states): "Detect end of sequence and clear the component's history." history_mask = np.ones(self._hparams.batch_size) for b in range(self._hparams.batch_size): end_state = states[b] if end_state: history_mask[b] = 0.0 self._component.update_history(self._session, history_mask)
Detect end of sequence and clear the component's history.
rsm/workflows/video_workflow.py
_compute_end_state_mask
Cerenaut/rsm
0
python
def _compute_end_state_mask(self, states): history_mask = np.ones(self._hparams.batch_size) for b in range(self._hparams.batch_size): end_state = states[b] if end_state: history_mask[b] = 0.0 self._component.update_history(self._session, history_mask)
def _compute_end_state_mask(self, states): history_mask = np.ones(self._hparams.batch_size) for b in range(self._hparams.batch_size): end_state = states[b] if end_state: history_mask[b] = 0.0 self._component.update_history(self._session, history_mask)<|docstring|>Detect end of sequence and clear the component's history.<|endoftext|>
2282fc4032bcd56d3625cd2d9d9cf8f62870ac4d952d14ddf98ab540fc9c572a
def training_step(self, dataset_handle, global_step, phase_change=None): 'The training procedure within the batch loop' del phase_change if self._freeze_training: batch_type = 'encoding' else: batch_type = 'training' data_subset = 'train' self._do_batch(dataset_handle, batch_type, data_subset, global_step)
The training procedure within the batch loop
rsm/workflows/video_workflow.py
training_step
Cerenaut/rsm
0
python
def training_step(self, dataset_handle, global_step, phase_change=None): del phase_change if self._freeze_training: batch_type = 'encoding' else: batch_type = 'training' data_subset = 'train' self._do_batch(dataset_handle, batch_type, data_subset, global_step)
def training_step(self, dataset_handle, global_step, phase_change=None): del phase_change if self._freeze_training: batch_type = 'encoding' else: batch_type = 'training' data_subset = 'train' self._do_batch(dataset_handle, batch_type, data_subset, global_step)<|docstring|>The training procedure within the batch loop<|endoftext|>
65b8b0b100cb07cf7e0a110032cc50a870831aea9092ea0a5488621a692f4260
def testing(self, dataset_handle, global_step): 'The testing procedure within the batch loop' batch_type = 'encoding' data_subset = 'test' self._do_batch(dataset_handle, batch_type, data_subset, global_step)
The testing procedure within the batch loop
rsm/workflows/video_workflow.py
testing
Cerenaut/rsm
0
python
def testing(self, dataset_handle, global_step): batch_type = 'encoding' data_subset = 'test' self._do_batch(dataset_handle, batch_type, data_subset, global_step)
def testing(self, dataset_handle, global_step): batch_type = 'encoding' data_subset = 'test' self._do_batch(dataset_handle, batch_type, data_subset, global_step)<|docstring|>The testing procedure within the batch loop<|endoftext|>
ad28dd446fe506a0be88c0fc0544c10242cc05ec9fd00bfd86ac2e69f2cfd1ba
def _compute_prime_end(self, states): 'Start self-looping when model is primed.' for b in range(self._hparams.batch_size): state = states[b] decoding = self.get_decoded_frame() previous = self.get_previous_frame() if (state >= (self._opts['prime_num_frames'] - 1)): previous[b] = decoding[b] self.set_previous_frame(previous)
Start self-looping when model is primed.
rsm/workflows/video_workflow.py
_compute_prime_end
Cerenaut/rsm
0
python
def _compute_prime_end(self, states): for b in range(self._hparams.batch_size): state = states[b] decoding = self.get_decoded_frame() previous = self.get_previous_frame() if (state >= (self._opts['prime_num_frames'] - 1)): previous[b] = decoding[b] self.set_previous_frame(previous)
def _compute_prime_end(self, states): for b in range(self._hparams.batch_size): state = states[b] decoding = self.get_decoded_frame() previous = self.get_previous_frame() if (state >= (self._opts['prime_num_frames'] - 1)): previous[b] = decoding[b] self.set_previous_frame(previous)<|docstring|>Start self-looping when model is primed.<|endoftext|>
a57e53000a903ade02ee4c4d553841aa22387d19542c2e29f826c862a52eddce
def _do_batch(self, dataset_handle, batch_type, data_subset, global_step): 'The training procedure within the batch loop' feed_dict = {self._placeholders['dataset_handle']: dataset_handle} self._component.update_feed_dict(feed_dict, batch_type) fetches = {'inputs': self._inputs, 'states': self._states, 'end_states': self._end_states} self._component.add_fetches(fetches, batch_type) if self.do_profile(): logging.info('Running batch with profile') fetched = self.session_run(fetches, feed_dict=feed_dict) self._component.set_fetches(fetched, batch_type) self._component.write_summaries(global_step, self._writer, batch_type=batch_type) self._inputs_vals = fetched['inputs'] self._states_vals = fetched['states'] self._end_states_vals = fetched['end_states'] self._do_batch_after_hook(global_step, batch_type, fetched, feed_dict, data_subset) return feed_dict
The training procedure within the batch loop
rsm/workflows/video_workflow.py
_do_batch
Cerenaut/rsm
0
python
def _do_batch(self, dataset_handle, batch_type, data_subset, global_step): feed_dict = {self._placeholders['dataset_handle']: dataset_handle} self._component.update_feed_dict(feed_dict, batch_type) fetches = {'inputs': self._inputs, 'states': self._states, 'end_states': self._end_states} self._component.add_fetches(fetches, batch_type) if self.do_profile(): logging.info('Running batch with profile') fetched = self.session_run(fetches, feed_dict=feed_dict) self._component.set_fetches(fetched, batch_type) self._component.write_summaries(global_step, self._writer, batch_type=batch_type) self._inputs_vals = fetched['inputs'] self._states_vals = fetched['states'] self._end_states_vals = fetched['end_states'] self._do_batch_after_hook(global_step, batch_type, fetched, feed_dict, data_subset) return feed_dict
def _do_batch(self, dataset_handle, batch_type, data_subset, global_step): feed_dict = {self._placeholders['dataset_handle']: dataset_handle} self._component.update_feed_dict(feed_dict, batch_type) fetches = {'inputs': self._inputs, 'states': self._states, 'end_states': self._end_states} self._component.add_fetches(fetches, batch_type) if self.do_profile(): logging.info('Running batch with profile') fetched = self.session_run(fetches, feed_dict=feed_dict) self._component.set_fetches(fetched, batch_type) self._component.write_summaries(global_step, self._writer, batch_type=batch_type) self._inputs_vals = fetched['inputs'] self._states_vals = fetched['states'] self._end_states_vals = fetched['end_states'] self._do_batch_after_hook(global_step, batch_type, fetched, feed_dict, data_subset) return feed_dict<|docstring|>The training procedure within the batch loop<|endoftext|>
56e4502894ead3336bf5e8b1577d22d89544d94f34ff6773149de0ad3232b8e7
def _do_batch_after_hook(self, global_step, batch_type, fetched, feed_dict, data_subset): 'Things to do after a batch is completed.' del feed_dict, fetched if ((batch_type == 'encoding') and (data_subset == 'test')): decoding = self.get_decoded_frame() if self._opts['prime']: self._compute_prime_end(self._states_vals) self._output_frames.append(decoding) self._groundtruth_frames.append(self._inputs_vals) if (self.previous_frame is None): self.previous_frame = np.zeros_like(self._inputs_vals) def denormalize(arr): eps = 1e-08 return ((arr - arr.min()) * ((1 / ((arr.max() - arr.min()) + eps)) * 255)).astype('uint8') def unpad(x, pad_width): slices = [] for c in pad_width: e = (None if (c[1] == 0) else (- c[1])) slices.append(slice(c[0], e)) return x[tuple(slices)] A = self._inputs_vals B = decoding C = self.previous_frame D = self._component.get_gan_inputs() padding_size = self._opts['frame_padding_size'] if (padding_size > 0): pad_h = ([padding_size] * 2) pad_w = ([padding_size] * 2) pad_width = [[0, 0], pad_h, pad_w, [0, 0]] A = unpad(A, pad_width) B = unpad(B, pad_width) C = unpad(C, pad_width) D = unpad(D, pad_width) num_features = np.prod(A.shape[1:]) mse_gan = ((A - B) ** 2).mean(axis=None) mse_rsm = ((A - D) ** 2).mean(axis=None) mse_prev = ((A - C) ** 2).mean(axis=None) self.all_mse_gan.append((mse_gan * num_features)) self.all_mse_rsm.append((mse_rsm * num_features)) self.all_mse_prev.append((mse_prev * num_features)) avg_mse_gan = np.average(self.all_mse_gan) avg_mse_rsm = np.average(self.all_mse_rsm) avg_mse_prev = np.average(self.all_mse_prev) summary = tf.Summary() summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_gan'), simple_value=mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_rsm'), simple_value=mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_prev'), simple_value=mse_prev) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_gan'), simple_value=avg_mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_rsm'), simple_value=avg_mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_prev'), simple_value=avg_mse_prev) self._writer.add_summary(summary, global_step) self._writer.flush() self.previous_frame = self._inputs_vals self._component.update_recurrent_and_feedback() self._component.update_statistics(batch_type, self._session) self._compute_end_state_mask(self._end_states_vals)
Things to do after a batch is completed.
rsm/workflows/video_workflow.py
_do_batch_after_hook
Cerenaut/rsm
0
python
def _do_batch_after_hook(self, global_step, batch_type, fetched, feed_dict, data_subset): del feed_dict, fetched if ((batch_type == 'encoding') and (data_subset == 'test')): decoding = self.get_decoded_frame() if self._opts['prime']: self._compute_prime_end(self._states_vals) self._output_frames.append(decoding) self._groundtruth_frames.append(self._inputs_vals) if (self.previous_frame is None): self.previous_frame = np.zeros_like(self._inputs_vals) def denormalize(arr): eps = 1e-08 return ((arr - arr.min()) * ((1 / ((arr.max() - arr.min()) + eps)) * 255)).astype('uint8') def unpad(x, pad_width): slices = [] for c in pad_width: e = (None if (c[1] == 0) else (- c[1])) slices.append(slice(c[0], e)) return x[tuple(slices)] A = self._inputs_vals B = decoding C = self.previous_frame D = self._component.get_gan_inputs() padding_size = self._opts['frame_padding_size'] if (padding_size > 0): pad_h = ([padding_size] * 2) pad_w = ([padding_size] * 2) pad_width = [[0, 0], pad_h, pad_w, [0, 0]] A = unpad(A, pad_width) B = unpad(B, pad_width) C = unpad(C, pad_width) D = unpad(D, pad_width) num_features = np.prod(A.shape[1:]) mse_gan = ((A - B) ** 2).mean(axis=None) mse_rsm = ((A - D) ** 2).mean(axis=None) mse_prev = ((A - C) ** 2).mean(axis=None) self.all_mse_gan.append((mse_gan * num_features)) self.all_mse_rsm.append((mse_rsm * num_features)) self.all_mse_prev.append((mse_prev * num_features)) avg_mse_gan = np.average(self.all_mse_gan) avg_mse_rsm = np.average(self.all_mse_rsm) avg_mse_prev = np.average(self.all_mse_prev) summary = tf.Summary() summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_gan'), simple_value=mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_rsm'), simple_value=mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_prev'), simple_value=mse_prev) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_gan'), simple_value=avg_mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_rsm'), simple_value=avg_mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_prev'), simple_value=avg_mse_prev) self._writer.add_summary(summary, global_step) self._writer.flush() self.previous_frame = self._inputs_vals self._component.update_recurrent_and_feedback() self._component.update_statistics(batch_type, self._session) self._compute_end_state_mask(self._end_states_vals)
def _do_batch_after_hook(self, global_step, batch_type, fetched, feed_dict, data_subset): del feed_dict, fetched if ((batch_type == 'encoding') and (data_subset == 'test')): decoding = self.get_decoded_frame() if self._opts['prime']: self._compute_prime_end(self._states_vals) self._output_frames.append(decoding) self._groundtruth_frames.append(self._inputs_vals) if (self.previous_frame is None): self.previous_frame = np.zeros_like(self._inputs_vals) def denormalize(arr): eps = 1e-08 return ((arr - arr.min()) * ((1 / ((arr.max() - arr.min()) + eps)) * 255)).astype('uint8') def unpad(x, pad_width): slices = [] for c in pad_width: e = (None if (c[1] == 0) else (- c[1])) slices.append(slice(c[0], e)) return x[tuple(slices)] A = self._inputs_vals B = decoding C = self.previous_frame D = self._component.get_gan_inputs() padding_size = self._opts['frame_padding_size'] if (padding_size > 0): pad_h = ([padding_size] * 2) pad_w = ([padding_size] * 2) pad_width = [[0, 0], pad_h, pad_w, [0, 0]] A = unpad(A, pad_width) B = unpad(B, pad_width) C = unpad(C, pad_width) D = unpad(D, pad_width) num_features = np.prod(A.shape[1:]) mse_gan = ((A - B) ** 2).mean(axis=None) mse_rsm = ((A - D) ** 2).mean(axis=None) mse_prev = ((A - C) ** 2).mean(axis=None) self.all_mse_gan.append((mse_gan * num_features)) self.all_mse_rsm.append((mse_rsm * num_features)) self.all_mse_prev.append((mse_prev * num_features)) avg_mse_gan = np.average(self.all_mse_gan) avg_mse_rsm = np.average(self.all_mse_rsm) avg_mse_prev = np.average(self.all_mse_prev) summary = tf.Summary() summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_gan'), simple_value=mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_rsm'), simple_value=mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/mse_prev'), simple_value=mse_prev) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_gan'), simple_value=avg_mse_gan) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_rsm'), simple_value=avg_mse_rsm) summary.value.add(tag=(((self._component.name + '/summaries/') + batch_type) + '/avg_mse_prev'), simple_value=avg_mse_prev) self._writer.add_summary(summary, global_step) self._writer.flush() self.previous_frame = self._inputs_vals self._component.update_recurrent_and_feedback() self._component.update_statistics(batch_type, self._session) self._compute_end_state_mask(self._end_states_vals)<|docstring|>Things to do after a batch is completed.<|endoftext|>
96dde5210c5403829caa9ec63cc27de814e4dadf8f28cc05bdc4ce8372ea3d1b
def frames_to_video(self, input_frames, filename=None): 'Convert given frames to video format, and export it to disk.' plt.switch_backend('agg') if (filename is None): filename = 'video' def chunks(l, n): 'Yield successive n-sized chunks from l.' for i in range(0, len(l), n): (yield l[i:(i + n)]) sequence_chunks = list(chunks(input_frames, self._sequence_length)) for (i, sequence) in enumerate(sequence_chunks): output_frames = [] sequence_filename = ((filename + '.') + str(i)) filepath = os.path.join(self._summary_dir, sequence_filename) for sample in sequence: cmap = None frame = sample[0] if (frame.shape[2] == 1): frame = frame.reshape(frame.shape[0], frame.shape[1]) cmap = 'gray' output_frames.append((frame, cmap)) fig1 = plt.figure(1) video_frames = [] for (j, (frame, cmap)) in enumerate(output_frames): title = 'Priming' if (j >= self._opts['prime_num_frames']): title = 'Self-looping' ttl = plt.text(4, 1.2, title, horizontalalignment='center', verticalalignment='bottom') video_frames.append([plt.imshow(frame, cmap=cmap, animated=True), ttl]) ani = animation.ArtistAnimation(fig1, video_frames, interval=50, blit=True, repeat_delay=1000) ani.save((filepath + '.mp4')) plt.close(fig1) num_frames = len(output_frames) fig2 = plt.figure(figsize=((num_frames + 1), 2)) gs1 = gridspec.GridSpec(1, (num_frames + 1)) gs1.update(wspace=0.025, hspace=0.05) plt.tight_layout() frame_idx = 0 for j in range((num_frames + 1)): if (j == self._opts['prime_num_frames']): (frame, cmap) = output_frames[0] frame = np.zeros_like(frame) else: (frame, cmap) = output_frames[frame_idx] frame_idx += 1 ax = plt.subplot(gs1[j]) ax.axis('off') ax.set_aspect('equal') ax.imshow(frame, cmap=cmap) fig2.savefig((filepath + '.png'), bbox_inches='tight') plt.close(fig2)
Convert given frames to video format, and export it to disk.
rsm/workflows/video_workflow.py
frames_to_video
Cerenaut/rsm
0
python
def frames_to_video(self, input_frames, filename=None): plt.switch_backend('agg') if (filename is None): filename = 'video' def chunks(l, n): 'Yield successive n-sized chunks from l.' for i in range(0, len(l), n): (yield l[i:(i + n)]) sequence_chunks = list(chunks(input_frames, self._sequence_length)) for (i, sequence) in enumerate(sequence_chunks): output_frames = [] sequence_filename = ((filename + '.') + str(i)) filepath = os.path.join(self._summary_dir, sequence_filename) for sample in sequence: cmap = None frame = sample[0] if (frame.shape[2] == 1): frame = frame.reshape(frame.shape[0], frame.shape[1]) cmap = 'gray' output_frames.append((frame, cmap)) fig1 = plt.figure(1) video_frames = [] for (j, (frame, cmap)) in enumerate(output_frames): title = 'Priming' if (j >= self._opts['prime_num_frames']): title = 'Self-looping' ttl = plt.text(4, 1.2, title, horizontalalignment='center', verticalalignment='bottom') video_frames.append([plt.imshow(frame, cmap=cmap, animated=True), ttl]) ani = animation.ArtistAnimation(fig1, video_frames, interval=50, blit=True, repeat_delay=1000) ani.save((filepath + '.mp4')) plt.close(fig1) num_frames = len(output_frames) fig2 = plt.figure(figsize=((num_frames + 1), 2)) gs1 = gridspec.GridSpec(1, (num_frames + 1)) gs1.update(wspace=0.025, hspace=0.05) plt.tight_layout() frame_idx = 0 for j in range((num_frames + 1)): if (j == self._opts['prime_num_frames']): (frame, cmap) = output_frames[0] frame = np.zeros_like(frame) else: (frame, cmap) = output_frames[frame_idx] frame_idx += 1 ax = plt.subplot(gs1[j]) ax.axis('off') ax.set_aspect('equal') ax.imshow(frame, cmap=cmap) fig2.savefig((filepath + '.png'), bbox_inches='tight') plt.close(fig2)
def frames_to_video(self, input_frames, filename=None): plt.switch_backend('agg') if (filename is None): filename = 'video' def chunks(l, n): 'Yield successive n-sized chunks from l.' for i in range(0, len(l), n): (yield l[i:(i + n)]) sequence_chunks = list(chunks(input_frames, self._sequence_length)) for (i, sequence) in enumerate(sequence_chunks): output_frames = [] sequence_filename = ((filename + '.') + str(i)) filepath = os.path.join(self._summary_dir, sequence_filename) for sample in sequence: cmap = None frame = sample[0] if (frame.shape[2] == 1): frame = frame.reshape(frame.shape[0], frame.shape[1]) cmap = 'gray' output_frames.append((frame, cmap)) fig1 = plt.figure(1) video_frames = [] for (j, (frame, cmap)) in enumerate(output_frames): title = 'Priming' if (j >= self._opts['prime_num_frames']): title = 'Self-looping' ttl = plt.text(4, 1.2, title, horizontalalignment='center', verticalalignment='bottom') video_frames.append([plt.imshow(frame, cmap=cmap, animated=True), ttl]) ani = animation.ArtistAnimation(fig1, video_frames, interval=50, blit=True, repeat_delay=1000) ani.save((filepath + '.mp4')) plt.close(fig1) num_frames = len(output_frames) fig2 = plt.figure(figsize=((num_frames + 1), 2)) gs1 = gridspec.GridSpec(1, (num_frames + 1)) gs1.update(wspace=0.025, hspace=0.05) plt.tight_layout() frame_idx = 0 for j in range((num_frames + 1)): if (j == self._opts['prime_num_frames']): (frame, cmap) = output_frames[0] frame = np.zeros_like(frame) else: (frame, cmap) = output_frames[frame_idx] frame_idx += 1 ax = plt.subplot(gs1[j]) ax.axis('off') ax.set_aspect('equal') ax.imshow(frame, cmap=cmap) fig2.savefig((filepath + '.png'), bbox_inches='tight') plt.close(fig2)<|docstring|>Convert given frames to video format, and export it to disk.<|endoftext|>
55e37ab9a5d092d4c1ecfeff4f8851834aa7d259ef18ca4a114b1de19904e69b
def helper_evaluate(self, batch): 'Evaluation method.' logging.info('Evaluate starting...') self._test_on_training_set = False if (self._test_on_training_set is True): testing_handle = self._session.run(self._dataset_iterators['training'].string_handle()) else: testing_handle = self._session.run(self._dataset_iterators['testing'].string_handle()) self._session.run(self._dataset_iterators['testing'].initializer) clear_before_test = self._opts['clear_before_test'] if clear_before_test: logging.info('Clearing memory history before testing set...') history_mask = np.zeros(self._hparams.batch_size) self._component.update_history(self._session, history_mask) num_testing_batches = self._opts['num_testing_batches'] for test_batch in range(0, num_testing_batches): do_print = True testing_progress_interval = self._opts['testing_progress_interval'] if (testing_progress_interval > 0): if ((test_batch % testing_progress_interval) != 0): do_print = False if do_print: global_step = test_batch logging.info('Test batch %d of %d, global step: %d', global_step, num_testing_batches, batch) self.testing(testing_handle, test_batch) export_videos = True if export_videos: if self._output_frames: self.frames_to_video(self._output_frames, filename='output') if self._groundtruth_frames: self.frames_to_video(self._groundtruth_frames, filename='groundtruth') if (self._output_frames and self._groundtruth_frames): stacked_frames = np.concatenate([self._output_frames, np.zeros_like(self._output_frames), self._groundtruth_frames], axis=4) self.frames_to_video(stacked_frames, filename='output_groundtruth')
Evaluation method.
rsm/workflows/video_workflow.py
helper_evaluate
Cerenaut/rsm
0
python
def helper_evaluate(self, batch): logging.info('Evaluate starting...') self._test_on_training_set = False if (self._test_on_training_set is True): testing_handle = self._session.run(self._dataset_iterators['training'].string_handle()) else: testing_handle = self._session.run(self._dataset_iterators['testing'].string_handle()) self._session.run(self._dataset_iterators['testing'].initializer) clear_before_test = self._opts['clear_before_test'] if clear_before_test: logging.info('Clearing memory history before testing set...') history_mask = np.zeros(self._hparams.batch_size) self._component.update_history(self._session, history_mask) num_testing_batches = self._opts['num_testing_batches'] for test_batch in range(0, num_testing_batches): do_print = True testing_progress_interval = self._opts['testing_progress_interval'] if (testing_progress_interval > 0): if ((test_batch % testing_progress_interval) != 0): do_print = False if do_print: global_step = test_batch logging.info('Test batch %d of %d, global step: %d', global_step, num_testing_batches, batch) self.testing(testing_handle, test_batch) export_videos = True if export_videos: if self._output_frames: self.frames_to_video(self._output_frames, filename='output') if self._groundtruth_frames: self.frames_to_video(self._groundtruth_frames, filename='groundtruth') if (self._output_frames and self._groundtruth_frames): stacked_frames = np.concatenate([self._output_frames, np.zeros_like(self._output_frames), self._groundtruth_frames], axis=4) self.frames_to_video(stacked_frames, filename='output_groundtruth')
def helper_evaluate(self, batch): logging.info('Evaluate starting...') self._test_on_training_set = False if (self._test_on_training_set is True): testing_handle = self._session.run(self._dataset_iterators['training'].string_handle()) else: testing_handle = self._session.run(self._dataset_iterators['testing'].string_handle()) self._session.run(self._dataset_iterators['testing'].initializer) clear_before_test = self._opts['clear_before_test'] if clear_before_test: logging.info('Clearing memory history before testing set...') history_mask = np.zeros(self._hparams.batch_size) self._component.update_history(self._session, history_mask) num_testing_batches = self._opts['num_testing_batches'] for test_batch in range(0, num_testing_batches): do_print = True testing_progress_interval = self._opts['testing_progress_interval'] if (testing_progress_interval > 0): if ((test_batch % testing_progress_interval) != 0): do_print = False if do_print: global_step = test_batch logging.info('Test batch %d of %d, global step: %d', global_step, num_testing_batches, batch) self.testing(testing_handle, test_batch) export_videos = True if export_videos: if self._output_frames: self.frames_to_video(self._output_frames, filename='output') if self._groundtruth_frames: self.frames_to_video(self._groundtruth_frames, filename='groundtruth') if (self._output_frames and self._groundtruth_frames): stacked_frames = np.concatenate([self._output_frames, np.zeros_like(self._output_frames), self._groundtruth_frames], axis=4) self.frames_to_video(stacked_frames, filename='output_groundtruth')<|docstring|>Evaluation method.<|endoftext|>
f508c3e71ab5a335bad88831d1cd2254ba50f46914b3ab09e497f79307e5be27
def chunks(l, n): 'Yield successive n-sized chunks from l.' for i in range(0, len(l), n): (yield l[i:(i + n)])
Yield successive n-sized chunks from l.
rsm/workflows/video_workflow.py
chunks
Cerenaut/rsm
0
python
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])<|docstring|>Yield successive n-sized chunks from l.<|endoftext|>
6f21eec0bbed1c66b72639d2bf05ef9328a4ca67386ca543c792547e14d2823c
@e.workflow async def check(*vals: int) -> list[bool]: 'Check for prime numbers in a list of values' res = [] futures = [defer(is_prime, x) for x in vals] while futures: elt = (await futures.pop(0)) res.append(elt) del elt return res
Check for prime numbers in a list of values
tests/test_local_scheduler.py
check
till-varoquaux/otf
0
python
@e.workflow async def check(*vals: int) -> list[bool]: res = [] futures = [defer(is_prime, x) for x in vals] while futures: elt = (await futures.pop(0)) res.append(elt) del elt return res
@e.workflow async def check(*vals: int) -> list[bool]: res = [] futures = [defer(is_prime, x) for x in vals] while futures: elt = (await futures.pop(0)) res.append(elt) del elt return res<|docstring|>Check for prime numbers in a list of values<|endoftext|>
9c6058ec3bf0f8ab5a43f0301f17cc2d34386fb07e61df566d42a6ffb9ac4d45
def new_action(parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): 'Create a new action and assign callbacks, shortcuts, etc.' a = QAction(text, parent) if (icon is not None): a.setIcon(new_icon(icon)) if (shortcut is not None): if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if (tip is not None): a.setToolTip(tip) a.setStatusTip(tip) if (slot is not None): a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a
Create a new action and assign callbacks, shortcuts, etc.
libs/utils.py
new_action
ahe7272/lableImg_auto_detection
17,641
python
def new_action(parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): a = QAction(text, parent) if (icon is not None): a.setIcon(new_icon(icon)) if (shortcut is not None): if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if (tip is not None): a.setToolTip(tip) a.setStatusTip(tip) if (slot is not None): a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a
def new_action(parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): a = QAction(text, parent) if (icon is not None): a.setIcon(new_icon(icon)) if (shortcut is not None): if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if (tip is not None): a.setToolTip(tip) a.setStatusTip(tip) if (slot is not None): a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a<|docstring|>Create a new action and assign callbacks, shortcuts, etc.<|endoftext|>
05f9c96a6702933b7e42ec723e510ca60733f9608fbd893c4e815a557eda7e83
def have_qstring(): 'p3/qt5 get rid of QString wrapper as py3 has native unicode str type' return (not ((sys.version_info.major >= 3) or QT_VERSION_STR.startswith('5.')))
p3/qt5 get rid of QString wrapper as py3 has native unicode str type
libs/utils.py
have_qstring
ahe7272/lableImg_auto_detection
17,641
python
def have_qstring(): return (not ((sys.version_info.major >= 3) or QT_VERSION_STR.startswith('5.')))
def have_qstring(): return (not ((sys.version_info.major >= 3) or QT_VERSION_STR.startswith('5.')))<|docstring|>p3/qt5 get rid of QString wrapper as py3 has native unicode str type<|endoftext|>
8d4dedfb46449da1587c976f76ab9b04173ebb9340aa2d7a7b7b03024601d145
def natural_sort(list, key=(lambda s: s)): '\n Sort the list into natural alphanumeric order.\n ' def get_alphanum_key_func(key): convert = (lambda text: (int(text) if text.isdigit() else text)) return (lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]) sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)
Sort the list into natural alphanumeric order.
libs/utils.py
natural_sort
ahe7272/lableImg_auto_detection
17,641
python
def natural_sort(list, key=(lambda s: s)): '\n \n ' def get_alphanum_key_func(key): convert = (lambda text: (int(text) if text.isdigit() else text)) return (lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]) sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)
def natural_sort(list, key=(lambda s: s)): '\n \n ' def get_alphanum_key_func(key): convert = (lambda text: (int(text) if text.isdigit() else text)) return (lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]) sort_key = get_alphanum_key_func(key) list.sort(key=sort_key)<|docstring|>Sort the list into natural alphanumeric order.<|endoftext|>
c60640ae3b67539414cdc56eff2b4ca4f08d78781dbcaf6ae338d94df904363e
def get_chart_data(self, date_from, date_to, region, tz): '\n Arguments\n ---------\n date_from (string) : YYYY-MM-DD\n date_to (string) : YYYY-MM-DD\n region (int) : region number as specified by Elia\n tz (string) : timezone data will be converted to (pytz format)\n\n Returns\n -------\n data (defaultdict) : {datetime (timezone aware) : value}\n ' if self.verbose: print('Getting prediction data... ', end='') method = 'GetChartDataForZoneXml' parameters = ((((('dateFrom=' + date_from) + '&dateTo=') + date_to) + '&sourceId=') + str(region)) url = (((self.root + method) + '?') + parameters) response = requests.get(url, verify=False) json_data = xmltodict.parse(response.text) time = [] data = defaultdict(list) for entry in json_data['SolarForecastingChartDataForZone']['SolarForecastingChartDataForZoneItems']['SolarForecastingChartDataForZoneItem']: time_data = entry['StartsOn']['a:DateTime'] format = '%Y-%m-%dT%H:%M:%SZ' time_datetime = datetime.datetime.strptime(time_data, format) time_datetime_utc = pytz.utc.localize(time_datetime) local_tz = pytz.timezone(tz) time_datetime_bru = time_datetime_utc.astimezone(local_tz) data['time'].append(time_datetime_bru) data['MostRecentForecast'].append(float(entry['MostRecentForecast'])) data['MonitoredCapacity'].append(float(entry['MonitoredCapacity'])) if self.info: df = pd.DataFrame(data) df.set_index('time', inplace=True) print((('\n' + BLUE) + 'Elia Data')) print(df.to_string()) if self.verbose: print((GREEN + 'Done')) return data
Arguments --------- date_from (string) : YYYY-MM-DD date_to (string) : YYYY-MM-DD region (int) : region number as specified by Elia tz (string) : timezone data will be converted to (pytz format) Returns ------- data (defaultdict) : {datetime (timezone aware) : value}
elia.py
get_chart_data
tomsaenen/solar_predictor
0
python
def get_chart_data(self, date_from, date_to, region, tz): '\n Arguments\n ---------\n date_from (string) : YYYY-MM-DD\n date_to (string) : YYYY-MM-DD\n region (int) : region number as specified by Elia\n tz (string) : timezone data will be converted to (pytz format)\n\n Returns\n -------\n data (defaultdict) : {datetime (timezone aware) : value}\n ' if self.verbose: print('Getting prediction data... ', end=) method = 'GetChartDataForZoneXml' parameters = ((((('dateFrom=' + date_from) + '&dateTo=') + date_to) + '&sourceId=') + str(region)) url = (((self.root + method) + '?') + parameters) response = requests.get(url, verify=False) json_data = xmltodict.parse(response.text) time = [] data = defaultdict(list) for entry in json_data['SolarForecastingChartDataForZone']['SolarForecastingChartDataForZoneItems']['SolarForecastingChartDataForZoneItem']: time_data = entry['StartsOn']['a:DateTime'] format = '%Y-%m-%dT%H:%M:%SZ' time_datetime = datetime.datetime.strptime(time_data, format) time_datetime_utc = pytz.utc.localize(time_datetime) local_tz = pytz.timezone(tz) time_datetime_bru = time_datetime_utc.astimezone(local_tz) data['time'].append(time_datetime_bru) data['MostRecentForecast'].append(float(entry['MostRecentForecast'])) data['MonitoredCapacity'].append(float(entry['MonitoredCapacity'])) if self.info: df = pd.DataFrame(data) df.set_index('time', inplace=True) print((('\n' + BLUE) + 'Elia Data')) print(df.to_string()) if self.verbose: print((GREEN + 'Done')) return data
def get_chart_data(self, date_from, date_to, region, tz): '\n Arguments\n ---------\n date_from (string) : YYYY-MM-DD\n date_to (string) : YYYY-MM-DD\n region (int) : region number as specified by Elia\n tz (string) : timezone data will be converted to (pytz format)\n\n Returns\n -------\n data (defaultdict) : {datetime (timezone aware) : value}\n ' if self.verbose: print('Getting prediction data... ', end=) method = 'GetChartDataForZoneXml' parameters = ((((('dateFrom=' + date_from) + '&dateTo=') + date_to) + '&sourceId=') + str(region)) url = (((self.root + method) + '?') + parameters) response = requests.get(url, verify=False) json_data = xmltodict.parse(response.text) time = [] data = defaultdict(list) for entry in json_data['SolarForecastingChartDataForZone']['SolarForecastingChartDataForZoneItems']['SolarForecastingChartDataForZoneItem']: time_data = entry['StartsOn']['a:DateTime'] format = '%Y-%m-%dT%H:%M:%SZ' time_datetime = datetime.datetime.strptime(time_data, format) time_datetime_utc = pytz.utc.localize(time_datetime) local_tz = pytz.timezone(tz) time_datetime_bru = time_datetime_utc.astimezone(local_tz) data['time'].append(time_datetime_bru) data['MostRecentForecast'].append(float(entry['MostRecentForecast'])) data['MonitoredCapacity'].append(float(entry['MonitoredCapacity'])) if self.info: df = pd.DataFrame(data) df.set_index('time', inplace=True) print((('\n' + BLUE) + 'Elia Data')) print(df.to_string()) if self.verbose: print((GREEN + 'Done')) return data<|docstring|>Arguments --------- date_from (string) : YYYY-MM-DD date_to (string) : YYYY-MM-DD region (int) : region number as specified by Elia tz (string) : timezone data will be converted to (pytz format) Returns ------- data (defaultdict) : {datetime (timezone aware) : value}<|endoftext|>
cc0ffdddb836c2c43c121af2093b96f4d5b0bd343af5457c574cc00e1955b1bf
def __init__(self, env, pixels_only=True, render_kwargs=None, pixel_keys=('pixels',), boxify=True): "Initializes a new pixel Wrapper.\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n boxify: (fastrl change) Default to True where instead of a Dict, return a Box\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n " super(PixelObservationWrapper, self).__init__(env) if (render_kwargs is None): render_kwargs = {} for key in pixel_keys: render_kwargs.setdefault(key, {}) render_mode = render_kwargs[key].pop('mode', 'rgb_array') assert (render_mode == 'rgb_array'), render_mode render_kwargs[key]['mode'] = 'rgb_array' wrapped_observation_space = env.observation_space if isinstance(wrapped_observation_space, spaces.Box): self._observation_is_dict = False invalid_keys = set([STATE_KEY]) elif isinstance(wrapped_observation_space, (spaces.Dict, collections.MutableMapping)): self._observation_is_dict = True invalid_keys = set(wrapped_observation_space.spaces.keys()) else: raise ValueError('Unsupported observation space structure.') if (not pixels_only): overlapping_keys = (set(pixel_keys) & set(invalid_keys)) if overlapping_keys: raise ValueError('Duplicate or reserved pixel keys {!r}.'.format(overlapping_keys)) if boxify: raise ValueError('boxify cannot be True of pixels_only is False.') if pixels_only: self.observation_space = spaces.Dict() elif self._observation_is_dict: self.observation_space = copy.deepcopy(wrapped_observation_space) else: self.observation_space = spaces.Dict() self.observation_space.spaces[STATE_KEY] = wrapped_observation_space pixels_spaces = {} for pixel_key in pixel_keys: pixels = self.env.render(**render_kwargs[pixel_key]) if np.issubdtype(pixels.dtype, np.integer): (low, high) = (0, 255) elif np.issubdtype(pixels.dtype, np.float): (low, high) = ((- float('inf')), float('inf')) else: raise TypeError(pixels.dtype) pixels_space = spaces.Box(shape=pixels.shape, low=low, high=high, dtype=pixels.dtype) if boxify: self.observation_space = pixels_space break pixels_spaces[pixel_key] = pixels_space if (not boxify): self.observation_space.spaces.update(pixels_spaces) self._env = env self._pixels_only = pixels_only self._render_kwargs = render_kwargs self._pixel_keys = pixel_keys self._boxify = boxify
Initializes a new pixel Wrapper. Args: env: The environment to wrap. pixels_only: If `True` (default), the original observation returned by the wrapped environment will be discarded, and a dictionary observation will only include pixels. If `False`, the observation dictionary will contain both the original observations and the pixel observations. render_kwargs: Optional `dict` containing keyword arguments passed to the `self.render` method. pixel_keys: Optional custom string specifying the pixel observation's key in the `OrderedDict` of observations. Defaults to 'pixels'. boxify: (fastrl change) Default to True where instead of a Dict, return a Box Raises: ValueError: If `env`'s observation spec is not compatible with the wrapper. Supported formats are a single array, or a dict of arrays. ValueError: If `env`'s observation already contains any of the specified `pixel_keys`.
fastrl/wrappers.py
__init__
tyoc213-contrib/fast-reinforcement-learning-2
0
python
def __init__(self, env, pixels_only=True, render_kwargs=None, pixel_keys=('pixels',), boxify=True): "Initializes a new pixel Wrapper.\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n boxify: (fastrl change) Default to True where instead of a Dict, return a Box\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n " super(PixelObservationWrapper, self).__init__(env) if (render_kwargs is None): render_kwargs = {} for key in pixel_keys: render_kwargs.setdefault(key, {}) render_mode = render_kwargs[key].pop('mode', 'rgb_array') assert (render_mode == 'rgb_array'), render_mode render_kwargs[key]['mode'] = 'rgb_array' wrapped_observation_space = env.observation_space if isinstance(wrapped_observation_space, spaces.Box): self._observation_is_dict = False invalid_keys = set([STATE_KEY]) elif isinstance(wrapped_observation_space, (spaces.Dict, collections.MutableMapping)): self._observation_is_dict = True invalid_keys = set(wrapped_observation_space.spaces.keys()) else: raise ValueError('Unsupported observation space structure.') if (not pixels_only): overlapping_keys = (set(pixel_keys) & set(invalid_keys)) if overlapping_keys: raise ValueError('Duplicate or reserved pixel keys {!r}.'.format(overlapping_keys)) if boxify: raise ValueError('boxify cannot be True of pixels_only is False.') if pixels_only: self.observation_space = spaces.Dict() elif self._observation_is_dict: self.observation_space = copy.deepcopy(wrapped_observation_space) else: self.observation_space = spaces.Dict() self.observation_space.spaces[STATE_KEY] = wrapped_observation_space pixels_spaces = {} for pixel_key in pixel_keys: pixels = self.env.render(**render_kwargs[pixel_key]) if np.issubdtype(pixels.dtype, np.integer): (low, high) = (0, 255) elif np.issubdtype(pixels.dtype, np.float): (low, high) = ((- float('inf')), float('inf')) else: raise TypeError(pixels.dtype) pixels_space = spaces.Box(shape=pixels.shape, low=low, high=high, dtype=pixels.dtype) if boxify: self.observation_space = pixels_space break pixels_spaces[pixel_key] = pixels_space if (not boxify): self.observation_space.spaces.update(pixels_spaces) self._env = env self._pixels_only = pixels_only self._render_kwargs = render_kwargs self._pixel_keys = pixel_keys self._boxify = boxify
def __init__(self, env, pixels_only=True, render_kwargs=None, pixel_keys=('pixels',), boxify=True): "Initializes a new pixel Wrapper.\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n boxify: (fastrl change) Default to True where instead of a Dict, return a Box\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n " super(PixelObservationWrapper, self).__init__(env) if (render_kwargs is None): render_kwargs = {} for key in pixel_keys: render_kwargs.setdefault(key, {}) render_mode = render_kwargs[key].pop('mode', 'rgb_array') assert (render_mode == 'rgb_array'), render_mode render_kwargs[key]['mode'] = 'rgb_array' wrapped_observation_space = env.observation_space if isinstance(wrapped_observation_space, spaces.Box): self._observation_is_dict = False invalid_keys = set([STATE_KEY]) elif isinstance(wrapped_observation_space, (spaces.Dict, collections.MutableMapping)): self._observation_is_dict = True invalid_keys = set(wrapped_observation_space.spaces.keys()) else: raise ValueError('Unsupported observation space structure.') if (not pixels_only): overlapping_keys = (set(pixel_keys) & set(invalid_keys)) if overlapping_keys: raise ValueError('Duplicate or reserved pixel keys {!r}.'.format(overlapping_keys)) if boxify: raise ValueError('boxify cannot be True of pixels_only is False.') if pixels_only: self.observation_space = spaces.Dict() elif self._observation_is_dict: self.observation_space = copy.deepcopy(wrapped_observation_space) else: self.observation_space = spaces.Dict() self.observation_space.spaces[STATE_KEY] = wrapped_observation_space pixels_spaces = {} for pixel_key in pixel_keys: pixels = self.env.render(**render_kwargs[pixel_key]) if np.issubdtype(pixels.dtype, np.integer): (low, high) = (0, 255) elif np.issubdtype(pixels.dtype, np.float): (low, high) = ((- float('inf')), float('inf')) else: raise TypeError(pixels.dtype) pixels_space = spaces.Box(shape=pixels.shape, low=low, high=high, dtype=pixels.dtype) if boxify: self.observation_space = pixels_space break pixels_spaces[pixel_key] = pixels_space if (not boxify): self.observation_space.spaces.update(pixels_spaces) self._env = env self._pixels_only = pixels_only self._render_kwargs = render_kwargs self._pixel_keys = pixel_keys self._boxify = boxify<|docstring|>Initializes a new pixel Wrapper. Args: env: The environment to wrap. pixels_only: If `True` (default), the original observation returned by the wrapped environment will be discarded, and a dictionary observation will only include pixels. If `False`, the observation dictionary will contain both the original observations and the pixel observations. render_kwargs: Optional `dict` containing keyword arguments passed to the `self.render` method. pixel_keys: Optional custom string specifying the pixel observation's key in the `OrderedDict` of observations. Defaults to 'pixels'. boxify: (fastrl change) Default to True where instead of a Dict, return a Box Raises: ValueError: If `env`'s observation spec is not compatible with the wrapper. Supported formats are a single array, or a dict of arrays. ValueError: If `env`'s observation already contains any of the specified `pixel_keys`.<|endoftext|>
702f68b474c50ef74ae9086eb8c9dd4b57694c0025e52621c6efcaa18f192371
def bspline_grid(img, control_zooms_mm=DEFAULT_ZOOMS_MM): 'Create a :obj:`~nibabel.nifti1.Nifti1Image` embedding the location of control points.' if isinstance(img, (str, Path)): img = nb.load(img) im_zooms = np.array(img.header.get_zooms()) im_shape = np.array(img.shape[:3]) dir_cos = (img.affine[(:3, :3)] / im_zooms) bs_affine = np.eye(4) bs_affine[(:3, :3)] = (np.array(control_zooms_mm) * dir_cos) bs_zooms = nb.affines.voxel_sizes(bs_affine) im_extent = (im_zooms * (im_shape - 1)) bs_shape = ((im_extent // bs_zooms) + 3).astype(int) bs_affine[(:3, 3)] = (apply_affine(img.affine, (0.5 * (im_shape - 1))) - apply_affine(bs_affine, (0.5 * (bs_shape - 1)))) return img.__class__(np.zeros(bs_shape, dtype='float32'), bs_affine)
Create a :obj:`~nibabel.nifti1.Nifti1Image` embedding the location of control points.
sdcflows/interfaces/bspline.py
bspline_grid
nipreps/sdcflows
16
python
def bspline_grid(img, control_zooms_mm=DEFAULT_ZOOMS_MM): if isinstance(img, (str, Path)): img = nb.load(img) im_zooms = np.array(img.header.get_zooms()) im_shape = np.array(img.shape[:3]) dir_cos = (img.affine[(:3, :3)] / im_zooms) bs_affine = np.eye(4) bs_affine[(:3, :3)] = (np.array(control_zooms_mm) * dir_cos) bs_zooms = nb.affines.voxel_sizes(bs_affine) im_extent = (im_zooms * (im_shape - 1)) bs_shape = ((im_extent // bs_zooms) + 3).astype(int) bs_affine[(:3, 3)] = (apply_affine(img.affine, (0.5 * (im_shape - 1))) - apply_affine(bs_affine, (0.5 * (bs_shape - 1)))) return img.__class__(np.zeros(bs_shape, dtype='float32'), bs_affine)
def bspline_grid(img, control_zooms_mm=DEFAULT_ZOOMS_MM): if isinstance(img, (str, Path)): img = nb.load(img) im_zooms = np.array(img.header.get_zooms()) im_shape = np.array(img.shape[:3]) dir_cos = (img.affine[(:3, :3)] / im_zooms) bs_affine = np.eye(4) bs_affine[(:3, :3)] = (np.array(control_zooms_mm) * dir_cos) bs_zooms = nb.affines.voxel_sizes(bs_affine) im_extent = (im_zooms * (im_shape - 1)) bs_shape = ((im_extent // bs_zooms) + 3).astype(int) bs_affine[(:3, 3)] = (apply_affine(img.affine, (0.5 * (im_shape - 1))) - apply_affine(bs_affine, (0.5 * (bs_shape - 1)))) return img.__class__(np.zeros(bs_shape, dtype='float32'), bs_affine)<|docstring|>Create a :obj:`~nibabel.nifti1.Nifti1Image` embedding the location of control points.<|endoftext|>
1b04f918a192273312b8f5e2bb3ba36c0a61d9930f8efef777b02ee371caeae7
def _fix_topup_fieldcoeff(in_coeff, fmap_ref, refpe_reversed=False, out_file=None): 'Read in a coefficients file generated by TOPUP and fix x-form headers.' from pathlib import Path import numpy as np import nibabel as nb if (out_file is None): out_file = Path('coefficients.nii.gz').absolute() coeffnii = nb.load(in_coeff) refnii = nb.load(fmap_ref) coeff_shape = np.array(coeffnii.shape[:3]) factors = np.array(coeffnii.header.get_zooms()[:3]) ref_shape = np.array(refnii.shape[:3]) exp_shape = ((ref_shape // factors) + (3 * (factors > 1))) if (not np.all((coeff_shape == exp_shape))): raise ValueError(f'Shape of coefficients file {coeff_shape} does not meet the expected shape {exp_shape} (toupup factors are {factors}).') newaff = np.eye(4) newaff[(:3, :3)] = (refnii.affine[(:3, :3)] * factors) c_ref = nb.affines.apply_affine(refnii.affine, (0.5 * (ref_shape - 1))) c_coeff = nb.affines.apply_affine(newaff, (0.5 * (coeff_shape - 1))) newaff[(:3, 3)] = (c_ref - c_coeff) header = coeffnii.header.copy() coeffnii.header.set_qform(coeffnii.header.get_qform(coded=False), code=0) coeffnii.header.set_sform(newaff, code=1) coeffnii.__class__(coeffnii.dataobj, newaff, header).to_filename(out_file) return out_file
Read in a coefficients file generated by TOPUP and fix x-form headers.
sdcflows/interfaces/bspline.py
_fix_topup_fieldcoeff
nipreps/sdcflows
16
python
def _fix_topup_fieldcoeff(in_coeff, fmap_ref, refpe_reversed=False, out_file=None): from pathlib import Path import numpy as np import nibabel as nb if (out_file is None): out_file = Path('coefficients.nii.gz').absolute() coeffnii = nb.load(in_coeff) refnii = nb.load(fmap_ref) coeff_shape = np.array(coeffnii.shape[:3]) factors = np.array(coeffnii.header.get_zooms()[:3]) ref_shape = np.array(refnii.shape[:3]) exp_shape = ((ref_shape // factors) + (3 * (factors > 1))) if (not np.all((coeff_shape == exp_shape))): raise ValueError(f'Shape of coefficients file {coeff_shape} does not meet the expected shape {exp_shape} (toupup factors are {factors}).') newaff = np.eye(4) newaff[(:3, :3)] = (refnii.affine[(:3, :3)] * factors) c_ref = nb.affines.apply_affine(refnii.affine, (0.5 * (ref_shape - 1))) c_coeff = nb.affines.apply_affine(newaff, (0.5 * (coeff_shape - 1))) newaff[(:3, 3)] = (c_ref - c_coeff) header = coeffnii.header.copy() coeffnii.header.set_qform(coeffnii.header.get_qform(coded=False), code=0) coeffnii.header.set_sform(newaff, code=1) coeffnii.__class__(coeffnii.dataobj, newaff, header).to_filename(out_file) return out_file
def _fix_topup_fieldcoeff(in_coeff, fmap_ref, refpe_reversed=False, out_file=None): from pathlib import Path import numpy as np import nibabel as nb if (out_file is None): out_file = Path('coefficients.nii.gz').absolute() coeffnii = nb.load(in_coeff) refnii = nb.load(fmap_ref) coeff_shape = np.array(coeffnii.shape[:3]) factors = np.array(coeffnii.header.get_zooms()[:3]) ref_shape = np.array(refnii.shape[:3]) exp_shape = ((ref_shape // factors) + (3 * (factors > 1))) if (not np.all((coeff_shape == exp_shape))): raise ValueError(f'Shape of coefficients file {coeff_shape} does not meet the expected shape {exp_shape} (toupup factors are {factors}).') newaff = np.eye(4) newaff[(:3, :3)] = (refnii.affine[(:3, :3)] * factors) c_ref = nb.affines.apply_affine(refnii.affine, (0.5 * (ref_shape - 1))) c_coeff = nb.affines.apply_affine(newaff, (0.5 * (coeff_shape - 1))) newaff[(:3, 3)] = (c_ref - c_coeff) header = coeffnii.header.copy() coeffnii.header.set_qform(coeffnii.header.get_qform(coded=False), code=0) coeffnii.header.set_sform(newaff, code=1) coeffnii.__class__(coeffnii.dataobj, newaff, header).to_filename(out_file) return out_file<|docstring|>Read in a coefficients file generated by TOPUP and fix x-form headers.<|endoftext|>
86977b2305d4156fa584659cec20f06701d10d50464616bbd7d075eeaec4164f
def _b0_resampler(data, coeffs, pe, ro, hmc_xfm=None, unwarp=None, newpath=None): 'Outsource the resampler into a separate callable function to allow parallelization.' from functools import partial filename = partial(fname_presuffix, newpath=newpath) retval = [filename(data, suffix=s) for s in ('_unwarped', '_xfm', '_field')] if (unwarp is None): from sdcflows.transform import B0FieldTransform unwarp = B0FieldTransform(coeffs=[nb.load(cname) for cname in coeffs]) if (hmc_xfm is not None): from nitransforms.linear import Affine from nitransforms.io.itk import ITKLinearTransform as XFMLoader unwarp.xfm = Affine(XFMLoader.from_filename(hmc_xfm).to_ras()) if unwarp.fit(data): unwarp.shifts.to_filename(retval[2]) else: retval[2] = None unwarp.apply(nb.load(data), ro_time=ro, pe_dir=pe).to_filename(retval[0]) unwarp.to_displacements(ro_time=ro, pe_dir=pe).to_filename(retval[1]) return retval
Outsource the resampler into a separate callable function to allow parallelization.
sdcflows/interfaces/bspline.py
_b0_resampler
nipreps/sdcflows
16
python
def _b0_resampler(data, coeffs, pe, ro, hmc_xfm=None, unwarp=None, newpath=None): from functools import partial filename = partial(fname_presuffix, newpath=newpath) retval = [filename(data, suffix=s) for s in ('_unwarped', '_xfm', '_field')] if (unwarp is None): from sdcflows.transform import B0FieldTransform unwarp = B0FieldTransform(coeffs=[nb.load(cname) for cname in coeffs]) if (hmc_xfm is not None): from nitransforms.linear import Affine from nitransforms.io.itk import ITKLinearTransform as XFMLoader unwarp.xfm = Affine(XFMLoader.from_filename(hmc_xfm).to_ras()) if unwarp.fit(data): unwarp.shifts.to_filename(retval[2]) else: retval[2] = None unwarp.apply(nb.load(data), ro_time=ro, pe_dir=pe).to_filename(retval[0]) unwarp.to_displacements(ro_time=ro, pe_dir=pe).to_filename(retval[1]) return retval
def _b0_resampler(data, coeffs, pe, ro, hmc_xfm=None, unwarp=None, newpath=None): from functools import partial filename = partial(fname_presuffix, newpath=newpath) retval = [filename(data, suffix=s) for s in ('_unwarped', '_xfm', '_field')] if (unwarp is None): from sdcflows.transform import B0FieldTransform unwarp = B0FieldTransform(coeffs=[nb.load(cname) for cname in coeffs]) if (hmc_xfm is not None): from nitransforms.linear import Affine from nitransforms.io.itk import ITKLinearTransform as XFMLoader unwarp.xfm = Affine(XFMLoader.from_filename(hmc_xfm).to_ras()) if unwarp.fit(data): unwarp.shifts.to_filename(retval[2]) else: retval[2] = None unwarp.apply(nb.load(data), ro_time=ro, pe_dir=pe).to_filename(retval[0]) unwarp.to_displacements(ro_time=ro, pe_dir=pe).to_filename(retval[1]) return retval<|docstring|>Outsource the resampler into a separate callable function to allow parallelization.<|endoftext|>
2e98b91cccd2e126628a36cbec5df920afb953a491a4eb13465ec8d785b69cb6
def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer) -> List: '\n runs the threads after checking permissions.\n ' threads = [] emulator_name = ('emulator-' + emulator.port) if (('android.permission.INTERNET' in apk.permissions) or ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions)): if (config.GUIDED_APPROACH == 1): network_delay_interval_events = interval_event.read_interval_event_from_file('test/networkdelay.txt') else: network_delay_interval_events = fuzz.generate_step_interval_event(NetworkDelay) threads.append(Thread(target=fuzz.random_network_delay, args=(config.LOCALHOST, emulator, network_delay_interval_events))) if (config.GUIDED_APPROACH == 1): network_speed_interval_event = interval_event.read_interval_event_from_file('test/networkstatus.txt') else: network_speed_interval_event = fuzz.generate_step_interval_event(NetworkStatus) threads.append(Thread(target=fuzz.random_network_speed, args=(config.LOCALHOST, emulator, network_speed_interval_event))) airplane_mode_interval_events = fuzz.generate_step_interval_event(Airplane) threads.append(Thread(target=fuzz.random_airplane_mode_call, args=(emulator_name, airplane_mode_interval_events))) if ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions): if (config.GUIDED_APPROACH == 1): gsm_profile_interval_events = interval_event.read_interval_event_from_file('test/gsmprofile.txt') else: gsm_profile_interval_events = fuzz.generate_step_interval_event(GsmProfile) threads.append(Thread(target=fuzz.random_gsm_profile, args=(config.LOCALHOST, emulator, config.UNIFORM_INTERVAL, gsm_profile_interval_events))) return threads
runs the threads after checking permissions.
monkey.py
threads_to_run
mehedi-iitdu/MobiCoMonkey
2
python
def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer) -> List: '\n \n ' threads = [] emulator_name = ('emulator-' + emulator.port) if (('android.permission.INTERNET' in apk.permissions) or ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions)): if (config.GUIDED_APPROACH == 1): network_delay_interval_events = interval_event.read_interval_event_from_file('test/networkdelay.txt') else: network_delay_interval_events = fuzz.generate_step_interval_event(NetworkDelay) threads.append(Thread(target=fuzz.random_network_delay, args=(config.LOCALHOST, emulator, network_delay_interval_events))) if (config.GUIDED_APPROACH == 1): network_speed_interval_event = interval_event.read_interval_event_from_file('test/networkstatus.txt') else: network_speed_interval_event = fuzz.generate_step_interval_event(NetworkStatus) threads.append(Thread(target=fuzz.random_network_speed, args=(config.LOCALHOST, emulator, network_speed_interval_event))) airplane_mode_interval_events = fuzz.generate_step_interval_event(Airplane) threads.append(Thread(target=fuzz.random_airplane_mode_call, args=(emulator_name, airplane_mode_interval_events))) if ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions): if (config.GUIDED_APPROACH == 1): gsm_profile_interval_events = interval_event.read_interval_event_from_file('test/gsmprofile.txt') else: gsm_profile_interval_events = fuzz.generate_step_interval_event(GsmProfile) threads.append(Thread(target=fuzz.random_gsm_profile, args=(config.LOCALHOST, emulator, config.UNIFORM_INTERVAL, gsm_profile_interval_events))) return threads
def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer) -> List: '\n \n ' threads = [] emulator_name = ('emulator-' + emulator.port) if (('android.permission.INTERNET' in apk.permissions) or ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions)): if (config.GUIDED_APPROACH == 1): network_delay_interval_events = interval_event.read_interval_event_from_file('test/networkdelay.txt') else: network_delay_interval_events = fuzz.generate_step_interval_event(NetworkDelay) threads.append(Thread(target=fuzz.random_network_delay, args=(config.LOCALHOST, emulator, network_delay_interval_events))) if (config.GUIDED_APPROACH == 1): network_speed_interval_event = interval_event.read_interval_event_from_file('test/networkstatus.txt') else: network_speed_interval_event = fuzz.generate_step_interval_event(NetworkStatus) threads.append(Thread(target=fuzz.random_network_speed, args=(config.LOCALHOST, emulator, network_speed_interval_event))) airplane_mode_interval_events = fuzz.generate_step_interval_event(Airplane) threads.append(Thread(target=fuzz.random_airplane_mode_call, args=(emulator_name, airplane_mode_interval_events))) if ('android.permission.ACCESS_NETWORK_STATE' in apk.permissions): if (config.GUIDED_APPROACH == 1): gsm_profile_interval_events = interval_event.read_interval_event_from_file('test/gsmprofile.txt') else: gsm_profile_interval_events = fuzz.generate_step_interval_event(GsmProfile) threads.append(Thread(target=fuzz.random_gsm_profile, args=(config.LOCALHOST, emulator, config.UNIFORM_INTERVAL, gsm_profile_interval_events))) return threads<|docstring|>runs the threads after checking permissions.<|endoftext|>
8a3eabb44e8155d3ba96f05cef6992cd7cade315d250aada29eee69bf5c45feb
def run(emulator: Emulator, apk: Apk, emulator_name: str, emulator_port: int, seed: int, log: Logcat): '\n runs things\n ' fuzz = Fuzzer(config.MINIMUM_INTERVAL, config.MAXIMUM_INTERVAL, seed, config.DURATION, FatalWatcher(log.file_address)) threads = threads_to_run(emulator, apk, fuzz) [thread.start() for thread in threads] [thread.join() for thread in threads]
runs things
monkey.py
run
mehedi-iitdu/MobiCoMonkey
2
python
def run(emulator: Emulator, apk: Apk, emulator_name: str, emulator_port: int, seed: int, log: Logcat): '\n \n ' fuzz = Fuzzer(config.MINIMUM_INTERVAL, config.MAXIMUM_INTERVAL, seed, config.DURATION, FatalWatcher(log.file_address)) threads = threads_to_run(emulator, apk, fuzz) [thread.start() for thread in threads] [thread.join() for thread in threads]
def run(emulator: Emulator, apk: Apk, emulator_name: str, emulator_port: int, seed: int, log: Logcat): '\n \n ' fuzz = Fuzzer(config.MINIMUM_INTERVAL, config.MAXIMUM_INTERVAL, seed, config.DURATION, FatalWatcher(log.file_address)) threads = threads_to_run(emulator, apk, fuzz) [thread.start() for thread in threads] [thread.join() for thread in threads]<|docstring|>runs things<|endoftext|>
f0a708297891c1a75e56cab37ad8fefabfe6c96314dc63b6883cdd9f84f5356a
def check_auth(username, password): 'This function is called to check if a username /\n password combination is valid.\n ' return ((username == app.config['USERNAME']) and (sha512(password).hexdigest() == app.config['PASSWORD']))
This function is called to check if a username / password combination is valid.
packages/w3af/w3af/core/ui/api/utils/auth.py
check_auth
ZooAtmosphereGroup/HelloPackages
3
python
def check_auth(username, password): 'This function is called to check if a username /\n password combination is valid.\n ' return ((username == app.config['USERNAME']) and (sha512(password).hexdigest() == app.config['PASSWORD']))
def check_auth(username, password): 'This function is called to check if a username /\n password combination is valid.\n ' return ((username == app.config['USERNAME']) and (sha512(password).hexdigest() == app.config['PASSWORD']))<|docstring|>This function is called to check if a username / password combination is valid.<|endoftext|>
9f311ddfa4df93d62cfb64768b9ae74c0534ed7a7367550af9bda3b1f013eba8
def filter_events(record_data): '\n Filter events relevant for Epsagon.\n :param record_data: Record data that holds the vents.\n :return: dict / None.\n ' record = None if (record_data['messageType'] == 'DATA_MESSAGE'): original_events = record_data['logEvents'] partition_key = record_data['logStream'] record_data['subscriptionFilters'] = [f"Epsagon#{record_data['owner']}#{CURRENT_REGION}"] events = [] epsagon_debug(f'Found total of {len(original_events)} events') epsagon_debug(f'Original events: {original_events}') for event in original_events: if (REGEX.match(event['message']) is not None): events.append(event) epsagon_debug(f'Filtered total of {len(events)} events.') if events: record_data['logEvents'] = events record = {'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 'PartitionKey': partition_key} return record
Filter events relevant for Epsagon. :param record_data: Record data that holds the vents. :return: dict / None.
cloudwatch_log_trigger/cloudwatch_log_sender.py
filter_events
epsagon/epsagon-logs-sender
1
python
def filter_events(record_data): '\n Filter events relevant for Epsagon.\n :param record_data: Record data that holds the vents.\n :return: dict / None.\n ' record = None if (record_data['messageType'] == 'DATA_MESSAGE'): original_events = record_data['logEvents'] partition_key = record_data['logStream'] record_data['subscriptionFilters'] = [f"Epsagon#{record_data['owner']}#{CURRENT_REGION}"] events = [] epsagon_debug(f'Found total of {len(original_events)} events') epsagon_debug(f'Original events: {original_events}') for event in original_events: if (REGEX.match(event['message']) is not None): events.append(event) epsagon_debug(f'Filtered total of {len(events)} events.') if events: record_data['logEvents'] = events record = {'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 'PartitionKey': partition_key} return record
def filter_events(record_data): '\n Filter events relevant for Epsagon.\n :param record_data: Record data that holds the vents.\n :return: dict / None.\n ' record = None if (record_data['messageType'] == 'DATA_MESSAGE'): original_events = record_data['logEvents'] partition_key = record_data['logStream'] record_data['subscriptionFilters'] = [f"Epsagon#{record_data['owner']}#{CURRENT_REGION}"] events = [] epsagon_debug(f'Found total of {len(original_events)} events') epsagon_debug(f'Original events: {original_events}') for event in original_events: if (REGEX.match(event['message']) is not None): events.append(event) epsagon_debug(f'Filtered total of {len(events)} events.') if events: record_data['logEvents'] = events record = {'Data': gzip.compress(json.dumps(record_data).encode('ascii')), 'PartitionKey': partition_key} return record<|docstring|>Filter events relevant for Epsagon. :param record_data: Record data that holds the vents. :return: dict / None.<|endoftext|>
21768468145b9902341e92ca75cfb065328aa07e3d99bbb484d8397c60eb2d29
def forward_logs_to_epsagon(event): '\n Send filtered CloudWatch logs to Epsagon Kinesis.\n :param event: The triggered event from CloudWatch logs.\n ' try: record_data = json.loads(gzip.decompress(base64.b64decode(event['awslogs']['data']))) filtered_event = filter_events(record_data) if (not filtered_event): epsagon_debug('No logs match') return False original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') os.environ['AWS_ACCESS_KEY_ID'] = AWS_KEY os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET try: kinesis.put_record(StreamName=KINESIS_NAME, Data=filtered_event['Data'], PartitionKey=filtered_event['PartitionKey']) finally: os.environ['AWS_ACCESS_KEY_ID'] = original_access_key os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key except Exception as err: epsagon_debug('Encountered error: {}'.format(err)) epsagon_debug(traceback.format_exc()) return True
Send filtered CloudWatch logs to Epsagon Kinesis. :param event: The triggered event from CloudWatch logs.
cloudwatch_log_trigger/cloudwatch_log_sender.py
forward_logs_to_epsagon
epsagon/epsagon-logs-sender
1
python
def forward_logs_to_epsagon(event): '\n Send filtered CloudWatch logs to Epsagon Kinesis.\n :param event: The triggered event from CloudWatch logs.\n ' try: record_data = json.loads(gzip.decompress(base64.b64decode(event['awslogs']['data']))) filtered_event = filter_events(record_data) if (not filtered_event): epsagon_debug('No logs match') return False original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') os.environ['AWS_ACCESS_KEY_ID'] = AWS_KEY os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET try: kinesis.put_record(StreamName=KINESIS_NAME, Data=filtered_event['Data'], PartitionKey=filtered_event['PartitionKey']) finally: os.environ['AWS_ACCESS_KEY_ID'] = original_access_key os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key except Exception as err: epsagon_debug('Encountered error: {}'.format(err)) epsagon_debug(traceback.format_exc()) return True
def forward_logs_to_epsagon(event): '\n Send filtered CloudWatch logs to Epsagon Kinesis.\n :param event: The triggered event from CloudWatch logs.\n ' try: record_data = json.loads(gzip.decompress(base64.b64decode(event['awslogs']['data']))) filtered_event = filter_events(record_data) if (not filtered_event): epsagon_debug('No logs match') return False original_access_key = os.environ.pop('AWS_ACCESS_KEY_ID') original_secret_key = os.environ.pop('AWS_SECRET_ACCESS_KEY') os.environ['AWS_ACCESS_KEY_ID'] = AWS_KEY os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET try: kinesis.put_record(StreamName=KINESIS_NAME, Data=filtered_event['Data'], PartitionKey=filtered_event['PartitionKey']) finally: os.environ['AWS_ACCESS_KEY_ID'] = original_access_key os.environ['AWS_SECRET_ACCESS_KEY'] = original_secret_key except Exception as err: epsagon_debug('Encountered error: {}'.format(err)) epsagon_debug(traceback.format_exc()) return True<|docstring|>Send filtered CloudWatch logs to Epsagon Kinesis. :param event: The triggered event from CloudWatch logs.<|endoftext|>
2810fd9cfa15b2ca86e550f11057b321e348c2b049d800414a581711e1cd4638
async def test_proxy(self, proxy): '\n 测试代理\n\n :param proxy: 指定代理\n ' async with aiohttp.ClientSession() as session: try: if isinstance(proxy, bytes): proxy = proxy.decode('utf8') async with session.get(VALIDATOR_BASE_URL, proxy=proxy, timeout=REQUEST_TIMEOUT) as resp: if (resp.status == 200): self.sqlite3.increase_proxy_score(proxy) logger.info('Validator √ {}'.format(proxy)) else: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy)) except: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy))
测试代理 :param proxy: 指定代理
async_proxy_pool/validator.py
test_proxy
luorui110120/async-proxy-pool
0
python
async def test_proxy(self, proxy): '\n 测试代理\n\n :param proxy: 指定代理\n ' async with aiohttp.ClientSession() as session: try: if isinstance(proxy, bytes): proxy = proxy.decode('utf8') async with session.get(VALIDATOR_BASE_URL, proxy=proxy, timeout=REQUEST_TIMEOUT) as resp: if (resp.status == 200): self.sqlite3.increase_proxy_score(proxy) logger.info('Validator √ {}'.format(proxy)) else: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy)) except: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy))
async def test_proxy(self, proxy): '\n 测试代理\n\n :param proxy: 指定代理\n ' async with aiohttp.ClientSession() as session: try: if isinstance(proxy, bytes): proxy = proxy.decode('utf8') async with session.get(VALIDATOR_BASE_URL, proxy=proxy, timeout=REQUEST_TIMEOUT) as resp: if (resp.status == 200): self.sqlite3.increase_proxy_score(proxy) logger.info('Validator √ {}'.format(proxy)) else: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy)) except: self.sqlite3.reduce_proxy_score(proxy) logger.info('Validator × {}'.format(proxy))<|docstring|>测试代理 :param proxy: 指定代理<|endoftext|>
a70b1b68b4428157195123a584be38ae04c1963f8e2276797db111fc04f0bdea
def run(self): '\n 启动校验器\n ' logger.info('Validator working...') logger.info('Validator website is {}'.format(VALIDATOR_BASE_URL)) proxies = self.sqlite3.all_proxies() for proxy in proxies: self.test_proxy_new(proxy) logger.info('Validator resting...')
启动校验器
async_proxy_pool/validator.py
run
luorui110120/async-proxy-pool
0
python
def run(self): '\n \n ' logger.info('Validator working...') logger.info('Validator website is {}'.format(VALIDATOR_BASE_URL)) proxies = self.sqlite3.all_proxies() for proxy in proxies: self.test_proxy_new(proxy) logger.info('Validator resting...')
def run(self): '\n \n ' logger.info('Validator working...') logger.info('Validator website is {}'.format(VALIDATOR_BASE_URL)) proxies = self.sqlite3.all_proxies() for proxy in proxies: self.test_proxy_new(proxy) logger.info('Validator resting...')<|docstring|>启动校验器<|endoftext|>
5656de65071748d3e3ce73026a9dd1434ee64ea5c7611ad6ad44d910ae96de28
def __getitem__(self, index): 'bbox img org' txtPath = (self.trainAnnoPath + self.annNames[index]) infos = np.loadtxt(txtPath) infos = np.array(infos, dtype=np.float32).reshape((- 1), 5) bboxes = infos[(:, :4)] classes = infos[(:, 4:)] img = cv2.imread(((self.trainImgPath + self.annNames[index].split('.')[0]) + '.jpg')) img = img.astype(np.float32) winName = self.annNames[index] if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName, color=(0, 0, 255)) 'unifor resize 放在最后,输入网络的图片会有很多的0, 经过imgaug这些将会变为非0有利于学习' (img, infos, bboxes) = resizeUniform(img, self.netInputSizehw, bboxes) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_resize'), color=(0, 0, 255)) if self.augFlag: 'Img Aug With Shape, 放射变换的增强一定要放在前面,主要是0的情况' bboxes[(:, 2:)] = (bboxes[(:, :2)] + bboxes[(:, 2:)]) imgauger = ImgAugWithShape(img, bboxes) imgauger.shear(15) imgauger.translate(translate=[(- 0.2), 0.2]) (img, bboxes) = (imgauger.img, imgauger.boxes) bboxes[(:, 2:)] = (bboxes[(:, 2:)] - bboxes[(:, :2)]) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augshape'), color=(0, 0, 255)) '非放射变换,放在最后, 最后的img 不用clip到(0,1)之间' imgauger = ImgAugWithoutShape(img) imgauger.brightness() imgauger.constrast() imgauger.saturation() imgauger.normalize1(mean=self.normalize[0], std=self.normalize[1]) img = imgauger.img if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augcolor'), color=(0, 0, 255)) if self.showFlag: outwh = (80, 80) self.__show(np.copy(cv2.resize(img, (outwh[0], outwh[1]))).astype(np.uint8), bboxes, classes, (winName + '_augoutlayer'), color=(0, 0, 255)) if self.showFlag: cv2.waitKey() 'return 两种return可供选择' img = img.transpose(2, 0, 1) meta = dict(images=torch.from_numpy(img.astype(np.float32)), bboxesGt=bboxes, classes=classes, annoName=self.annNames[index]) return meta
bbox img org
scripts_torch/utils_frequent/dataload/dataloader_detection.py
__getitem__
yunshangyue71/mycodes
0
python
def __getitem__(self, index): txtPath = (self.trainAnnoPath + self.annNames[index]) infos = np.loadtxt(txtPath) infos = np.array(infos, dtype=np.float32).reshape((- 1), 5) bboxes = infos[(:, :4)] classes = infos[(:, 4:)] img = cv2.imread(((self.trainImgPath + self.annNames[index].split('.')[0]) + '.jpg')) img = img.astype(np.float32) winName = self.annNames[index] if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName, color=(0, 0, 255)) 'unifor resize 放在最后,输入网络的图片会有很多的0, 经过imgaug这些将会变为非0有利于学习' (img, infos, bboxes) = resizeUniform(img, self.netInputSizehw, bboxes) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_resize'), color=(0, 0, 255)) if self.augFlag: 'Img Aug With Shape, 放射变换的增强一定要放在前面,主要是0的情况' bboxes[(:, 2:)] = (bboxes[(:, :2)] + bboxes[(:, 2:)]) imgauger = ImgAugWithShape(img, bboxes) imgauger.shear(15) imgauger.translate(translate=[(- 0.2), 0.2]) (img, bboxes) = (imgauger.img, imgauger.boxes) bboxes[(:, 2:)] = (bboxes[(:, 2:)] - bboxes[(:, :2)]) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augshape'), color=(0, 0, 255)) '非放射变换,放在最后, 最后的img 不用clip到(0,1)之间' imgauger = ImgAugWithoutShape(img) imgauger.brightness() imgauger.constrast() imgauger.saturation() imgauger.normalize1(mean=self.normalize[0], std=self.normalize[1]) img = imgauger.img if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augcolor'), color=(0, 0, 255)) if self.showFlag: outwh = (80, 80) self.__show(np.copy(cv2.resize(img, (outwh[0], outwh[1]))).astype(np.uint8), bboxes, classes, (winName + '_augoutlayer'), color=(0, 0, 255)) if self.showFlag: cv2.waitKey() 'return 两种return可供选择' img = img.transpose(2, 0, 1) meta = dict(images=torch.from_numpy(img.astype(np.float32)), bboxesGt=bboxes, classes=classes, annoName=self.annNames[index]) return meta
def __getitem__(self, index): txtPath = (self.trainAnnoPath + self.annNames[index]) infos = np.loadtxt(txtPath) infos = np.array(infos, dtype=np.float32).reshape((- 1), 5) bboxes = infos[(:, :4)] classes = infos[(:, 4:)] img = cv2.imread(((self.trainImgPath + self.annNames[index].split('.')[0]) + '.jpg')) img = img.astype(np.float32) winName = self.annNames[index] if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName, color=(0, 0, 255)) 'unifor resize 放在最后,输入网络的图片会有很多的0, 经过imgaug这些将会变为非0有利于学习' (img, infos, bboxes) = resizeUniform(img, self.netInputSizehw, bboxes) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_resize'), color=(0, 0, 255)) if self.augFlag: 'Img Aug With Shape, 放射变换的增强一定要放在前面,主要是0的情况' bboxes[(:, 2:)] = (bboxes[(:, :2)] + bboxes[(:, 2:)]) imgauger = ImgAugWithShape(img, bboxes) imgauger.shear(15) imgauger.translate(translate=[(- 0.2), 0.2]) (img, bboxes) = (imgauger.img, imgauger.boxes) bboxes[(:, 2:)] = (bboxes[(:, 2:)] - bboxes[(:, :2)]) if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augshape'), color=(0, 0, 255)) '非放射变换,放在最后, 最后的img 不用clip到(0,1)之间' imgauger = ImgAugWithoutShape(img) imgauger.brightness() imgauger.constrast() imgauger.saturation() imgauger.normalize1(mean=self.normalize[0], std=self.normalize[1]) img = imgauger.img if self.showFlag: self.__show(np.copy(img).astype(np.uint8), bboxes, classes, (winName + '_augcolor'), color=(0, 0, 255)) if self.showFlag: outwh = (80, 80) self.__show(np.copy(cv2.resize(img, (outwh[0], outwh[1]))).astype(np.uint8), bboxes, classes, (winName + '_augoutlayer'), color=(0, 0, 255)) if self.showFlag: cv2.waitKey() 'return 两种return可供选择' img = img.transpose(2, 0, 1) meta = dict(images=torch.from_numpy(img.astype(np.float32)), bboxesGt=bboxes, classes=classes, annoName=self.annNames[index]) return meta<|docstring|>bbox img org<|endoftext|>
db8ebdb14395550a8293e78e33922ed0102682fb8e8741e6bd725d750af94430
def test_users_listed(self): 'Test that users are listed on a user page' url = reverse('admin:core_user_changelist') res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email)
Test that users are listed on a user page
app/core/tests/test_admin.py
test_users_listed
harshakakumanu/recipe-app-api
0
python
def test_users_listed(self): url = reverse('admin:core_user_changelist') res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email)
def test_users_listed(self): url = reverse('admin:core_user_changelist') res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email)<|docstring|>Test that users are listed on a user page<|endoftext|>
2f91609d818b8788e9e3901a2dbaf63075cd8a4bd6059a9a761e5c62e6429db9
@app.task(bind=True, base=AbortableTask, name='celery_tasks.tasks.run_inference') def run_inference(self, docker_name, package: str, docker_args: [str], fuzzer_image: str, build_file: str, inference_command_args: List[str], timeout_per_package: float, qemu: bool=False): '\n :param self: \n :param docker_name: \n :param package: \n :param docker_args: \n :param fuzzer_image: \n :param build_file: \n :param inference_command_args: \n :param timeout_per_package: \n :type inference_command_args: List\n :return: \n ' inference_command = None from celery.platforms import signals def int_handler(signum, frame): print('Int handler!') if (inference_command is not None): try: docker_command.stop(docker_name, _timeout=120) except sh.ErrorReturnCode: return True except sh.TimeoutException: return True return True else: return True signals['INT'] = int_handler print('Now working on {0}'.format(package)) try: if os.path.exists(build_file): with open(build_file, 'r') as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict['docker_image_name'] else: package_image_name = ((package + '_') + str(uuid.uuid4())[:8]) if (not os.path.exists(os.path.dirname(build_file))): os.mkdir(os.path.dirname(build_file)) package_image_name = helpers.docker_builder.return_current_package_image(package=package, fuzzer_image=fuzzer_image, package_image=package_image_name, json_output_path=build_file, qemu=qemu) print('docker run', ' '.join(docker_args), package_image_name, ' '.join(map((lambda x: str(x)), inference_command_args))) build_dict = {} with open(build_file) as build_filefp: build_dict = json.load(build_filefp) if (build_dict['qemu'] and ('-Q' not in inference_command_args)): inference_command_args.append('-Q') elif ((not build_dict['qemu']) and ('-Q' in inference_command_args)): inference_command_args.remove('-Q') docker_args.insert(0, '--cpus=1.0') inference_command = docker_command.run(docker_args, package_image_name, inference_command_args, _out=sys.stdout, _timeout=timeout_per_package) if (inference_command.exit_code != 0): print('Some went wrong for package {0}', package) return False if (not KEEP_IMAGES): docker_command.rmi('-f', package_image_name) print('Done! Returning True') return True except sh.ErrorReturnCode as e: print('Inference error:') print('STDOUT:\n', e.stdout.decode('utf-8')) print('STDERR:\n', e.stderr.decode('utf-8')) print('command line: {0}'.format(e.full_cmd)) logger.error('Inference error:') logger.error('STDOUT:\n', e.stdout.decode('utf-8')) logger.error('STDERR:\n', e.stderr.decode('utf-8')) logger.error('command line: {0}'.format(e.full_cmd)) return False except sh.TimeoutException as e: print('Inferring {0} timed out... Next one!'.format(package)) return True except sh.SignalException_SIGKILL as e: print('Killed') return True return True
:param self: :param docker_name: :param package: :param docker_args: :param fuzzer_image: :param build_file: :param inference_command_args: :param timeout_per_package: :type inference_command_args: List :return:
fexm/celery_tasks/tasks.py
run_inference
fgsect/fexm
105
python
@app.task(bind=True, base=AbortableTask, name='celery_tasks.tasks.run_inference') def run_inference(self, docker_name, package: str, docker_args: [str], fuzzer_image: str, build_file: str, inference_command_args: List[str], timeout_per_package: float, qemu: bool=False): '\n :param self: \n :param docker_name: \n :param package: \n :param docker_args: \n :param fuzzer_image: \n :param build_file: \n :param inference_command_args: \n :param timeout_per_package: \n :type inference_command_args: List\n :return: \n ' inference_command = None from celery.platforms import signals def int_handler(signum, frame): print('Int handler!') if (inference_command is not None): try: docker_command.stop(docker_name, _timeout=120) except sh.ErrorReturnCode: return True except sh.TimeoutException: return True return True else: return True signals['INT'] = int_handler print('Now working on {0}'.format(package)) try: if os.path.exists(build_file): with open(build_file, 'r') as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict['docker_image_name'] else: package_image_name = ((package + '_') + str(uuid.uuid4())[:8]) if (not os.path.exists(os.path.dirname(build_file))): os.mkdir(os.path.dirname(build_file)) package_image_name = helpers.docker_builder.return_current_package_image(package=package, fuzzer_image=fuzzer_image, package_image=package_image_name, json_output_path=build_file, qemu=qemu) print('docker run', ' '.join(docker_args), package_image_name, ' '.join(map((lambda x: str(x)), inference_command_args))) build_dict = {} with open(build_file) as build_filefp: build_dict = json.load(build_filefp) if (build_dict['qemu'] and ('-Q' not in inference_command_args)): inference_command_args.append('-Q') elif ((not build_dict['qemu']) and ('-Q' in inference_command_args)): inference_command_args.remove('-Q') docker_args.insert(0, '--cpus=1.0') inference_command = docker_command.run(docker_args, package_image_name, inference_command_args, _out=sys.stdout, _timeout=timeout_per_package) if (inference_command.exit_code != 0): print('Some went wrong for package {0}', package) return False if (not KEEP_IMAGES): docker_command.rmi('-f', package_image_name) print('Done! Returning True') return True except sh.ErrorReturnCode as e: print('Inference error:') print('STDOUT:\n', e.stdout.decode('utf-8')) print('STDERR:\n', e.stderr.decode('utf-8')) print('command line: {0}'.format(e.full_cmd)) logger.error('Inference error:') logger.error('STDOUT:\n', e.stdout.decode('utf-8')) logger.error('STDERR:\n', e.stderr.decode('utf-8')) logger.error('command line: {0}'.format(e.full_cmd)) return False except sh.TimeoutException as e: print('Inferring {0} timed out... Next one!'.format(package)) return True except sh.SignalException_SIGKILL as e: print('Killed') return True return True
@app.task(bind=True, base=AbortableTask, name='celery_tasks.tasks.run_inference') def run_inference(self, docker_name, package: str, docker_args: [str], fuzzer_image: str, build_file: str, inference_command_args: List[str], timeout_per_package: float, qemu: bool=False): '\n :param self: \n :param docker_name: \n :param package: \n :param docker_args: \n :param fuzzer_image: \n :param build_file: \n :param inference_command_args: \n :param timeout_per_package: \n :type inference_command_args: List\n :return: \n ' inference_command = None from celery.platforms import signals def int_handler(signum, frame): print('Int handler!') if (inference_command is not None): try: docker_command.stop(docker_name, _timeout=120) except sh.ErrorReturnCode: return True except sh.TimeoutException: return True return True else: return True signals['INT'] = int_handler print('Now working on {0}'.format(package)) try: if os.path.exists(build_file): with open(build_file, 'r') as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict['docker_image_name'] else: package_image_name = ((package + '_') + str(uuid.uuid4())[:8]) if (not os.path.exists(os.path.dirname(build_file))): os.mkdir(os.path.dirname(build_file)) package_image_name = helpers.docker_builder.return_current_package_image(package=package, fuzzer_image=fuzzer_image, package_image=package_image_name, json_output_path=build_file, qemu=qemu) print('docker run', ' '.join(docker_args), package_image_name, ' '.join(map((lambda x: str(x)), inference_command_args))) build_dict = {} with open(build_file) as build_filefp: build_dict = json.load(build_filefp) if (build_dict['qemu'] and ('-Q' not in inference_command_args)): inference_command_args.append('-Q') elif ((not build_dict['qemu']) and ('-Q' in inference_command_args)): inference_command_args.remove('-Q') docker_args.insert(0, '--cpus=1.0') inference_command = docker_command.run(docker_args, package_image_name, inference_command_args, _out=sys.stdout, _timeout=timeout_per_package) if (inference_command.exit_code != 0): print('Some went wrong for package {0}', package) return False if (not KEEP_IMAGES): docker_command.rmi('-f', package_image_name) print('Done! Returning True') return True except sh.ErrorReturnCode as e: print('Inference error:') print('STDOUT:\n', e.stdout.decode('utf-8')) print('STDERR:\n', e.stderr.decode('utf-8')) print('command line: {0}'.format(e.full_cmd)) logger.error('Inference error:') logger.error('STDOUT:\n', e.stdout.decode('utf-8')) logger.error('STDERR:\n', e.stderr.decode('utf-8')) logger.error('command line: {0}'.format(e.full_cmd)) return False except sh.TimeoutException as e: print('Inferring {0} timed out... Next one!'.format(package)) return True except sh.SignalException_SIGKILL as e: print('Killed') return True return True<|docstring|>:param self: :param docker_name: :param package: :param docker_args: :param fuzzer_image: :param build_file: :param inference_command_args: :param timeout_per_package: :type inference_command_args: List :return:<|endoftext|>
de00a13ce917e47ed5fe21d220b71b1b750808fdd64e62ad1f26deaddb537287
@app.task(bind=True, name='celery_tasks.tasks.run_eval') def run_eval(self, package: str, fuzzer_image: str, volume_path: str, seeds_path: str, fuzz_duration: int=(45 * 60), use_asan: int=False, exec_timeout: int=None, qemu: bool=False, config_dict: typing.Dict[(str, object)]=None): '\n\n :param self:\n :param package:\n :param fuzzer_image:\n :param volume_path:\n :param seeds_path:\n :param fuzz_duration:\n :param use_asan:\n :param exec_timeout:\n :param qemu:\n :param config_dict:\n :return:\n ' print('Got eval task for package {0}'.format(package)) logger.info('Got eval task for package {0}'.format(package)) volumes_dict = {os.path.join(volume_path, 'fuzz_data'): {'bind': '/results', 'mode': 'rw'}, os.path.join(volume_path, 'build_data'): {'bind': '/build', 'mode': 'rw'}, os.path.join(volume_path, 'run_configurations'): {'bind': '/run_configurations', 'mode': 'ro'}, seeds_path: {'bind': '/fuzz/seeds', 'mode': 'ro'}} additional_env_variables = {} if use_asan: additional_env_variables['AFL_USE_ASAN'] = '1' eval_package_dict = {'package': package, 'volume': '/results', 'fuzz_duration': int(fuzz_duration), 'exec_timeout': exec_timeout, 'qemu': qemu, 'seeds': '/fuzz/seeds', 'fuzzing_cores_per_binary': config_dict.get('fuzzing_cores_per_binary'), 'asan': use_asan} os.makedirs(os.path.join(volume_path, 'run_configurations'), exist_ok=True) with open(os.path.join(volume_path, 'run_configurations', (package + '.json')), 'w') as fp: json.dump(eval_package_dict, fp, indent=4, sort_keys=True) eval_args = ['/inputinferer/configfinder/eval_package.py', (('/run_configurations/' + package) + '.json')] container = docker_client.containers.run(image=fuzzer_image, remove=True, cap_add=['SYS_PTRACE'], security_opt=['seccomp=unconfined'], entrypoint='python', volumes=volumes_dict, command=eval_args, detach=True, stream=True, stdout=True, stderr=True, name=((package + '_fuzz_') + str(uuid.uuid4())[:4]), environment=additional_env_variables) container_output = '' for line in container.logs(stream=True): logger.info(line.decode('utf-8').strip()) container_output += line.decode('utf-8') status = container.wait() if (status['StatusCode'] != 0): logger.error('Error while running docker command. Docker Output:\n {0}. Return value {1}'.format(container_output, status['StatusCode'])) return False return True
:param self: :param package: :param fuzzer_image: :param volume_path: :param seeds_path: :param fuzz_duration: :param use_asan: :param exec_timeout: :param qemu: :param config_dict: :return:
fexm/celery_tasks/tasks.py
run_eval
fgsect/fexm
105
python
@app.task(bind=True, name='celery_tasks.tasks.run_eval') def run_eval(self, package: str, fuzzer_image: str, volume_path: str, seeds_path: str, fuzz_duration: int=(45 * 60), use_asan: int=False, exec_timeout: int=None, qemu: bool=False, config_dict: typing.Dict[(str, object)]=None): '\n\n :param self:\n :param package:\n :param fuzzer_image:\n :param volume_path:\n :param seeds_path:\n :param fuzz_duration:\n :param use_asan:\n :param exec_timeout:\n :param qemu:\n :param config_dict:\n :return:\n ' print('Got eval task for package {0}'.format(package)) logger.info('Got eval task for package {0}'.format(package)) volumes_dict = {os.path.join(volume_path, 'fuzz_data'): {'bind': '/results', 'mode': 'rw'}, os.path.join(volume_path, 'build_data'): {'bind': '/build', 'mode': 'rw'}, os.path.join(volume_path, 'run_configurations'): {'bind': '/run_configurations', 'mode': 'ro'}, seeds_path: {'bind': '/fuzz/seeds', 'mode': 'ro'}} additional_env_variables = {} if use_asan: additional_env_variables['AFL_USE_ASAN'] = '1' eval_package_dict = {'package': package, 'volume': '/results', 'fuzz_duration': int(fuzz_duration), 'exec_timeout': exec_timeout, 'qemu': qemu, 'seeds': '/fuzz/seeds', 'fuzzing_cores_per_binary': config_dict.get('fuzzing_cores_per_binary'), 'asan': use_asan} os.makedirs(os.path.join(volume_path, 'run_configurations'), exist_ok=True) with open(os.path.join(volume_path, 'run_configurations', (package + '.json')), 'w') as fp: json.dump(eval_package_dict, fp, indent=4, sort_keys=True) eval_args = ['/inputinferer/configfinder/eval_package.py', (('/run_configurations/' + package) + '.json')] container = docker_client.containers.run(image=fuzzer_image, remove=True, cap_add=['SYS_PTRACE'], security_opt=['seccomp=unconfined'], entrypoint='python', volumes=volumes_dict, command=eval_args, detach=True, stream=True, stdout=True, stderr=True, name=((package + '_fuzz_') + str(uuid.uuid4())[:4]), environment=additional_env_variables) container_output = for line in container.logs(stream=True): logger.info(line.decode('utf-8').strip()) container_output += line.decode('utf-8') status = container.wait() if (status['StatusCode'] != 0): logger.error('Error while running docker command. Docker Output:\n {0}. Return value {1}'.format(container_output, status['StatusCode'])) return False return True
@app.task(bind=True, name='celery_tasks.tasks.run_eval') def run_eval(self, package: str, fuzzer_image: str, volume_path: str, seeds_path: str, fuzz_duration: int=(45 * 60), use_asan: int=False, exec_timeout: int=None, qemu: bool=False, config_dict: typing.Dict[(str, object)]=None): '\n\n :param self:\n :param package:\n :param fuzzer_image:\n :param volume_path:\n :param seeds_path:\n :param fuzz_duration:\n :param use_asan:\n :param exec_timeout:\n :param qemu:\n :param config_dict:\n :return:\n ' print('Got eval task for package {0}'.format(package)) logger.info('Got eval task for package {0}'.format(package)) volumes_dict = {os.path.join(volume_path, 'fuzz_data'): {'bind': '/results', 'mode': 'rw'}, os.path.join(volume_path, 'build_data'): {'bind': '/build', 'mode': 'rw'}, os.path.join(volume_path, 'run_configurations'): {'bind': '/run_configurations', 'mode': 'ro'}, seeds_path: {'bind': '/fuzz/seeds', 'mode': 'ro'}} additional_env_variables = {} if use_asan: additional_env_variables['AFL_USE_ASAN'] = '1' eval_package_dict = {'package': package, 'volume': '/results', 'fuzz_duration': int(fuzz_duration), 'exec_timeout': exec_timeout, 'qemu': qemu, 'seeds': '/fuzz/seeds', 'fuzzing_cores_per_binary': config_dict.get('fuzzing_cores_per_binary'), 'asan': use_asan} os.makedirs(os.path.join(volume_path, 'run_configurations'), exist_ok=True) with open(os.path.join(volume_path, 'run_configurations', (package + '.json')), 'w') as fp: json.dump(eval_package_dict, fp, indent=4, sort_keys=True) eval_args = ['/inputinferer/configfinder/eval_package.py', (('/run_configurations/' + package) + '.json')] container = docker_client.containers.run(image=fuzzer_image, remove=True, cap_add=['SYS_PTRACE'], security_opt=['seccomp=unconfined'], entrypoint='python', volumes=volumes_dict, command=eval_args, detach=True, stream=True, stdout=True, stderr=True, name=((package + '_fuzz_') + str(uuid.uuid4())[:4]), environment=additional_env_variables) container_output = for line in container.logs(stream=True): logger.info(line.decode('utf-8').strip()) container_output += line.decode('utf-8') status = container.wait() if (status['StatusCode'] != 0): logger.error('Error while running docker command. Docker Output:\n {0}. Return value {1}'.format(container_output, status['StatusCode'])) return False return True<|docstring|>:param self: :param package: :param fuzzer_image: :param volume_path: :param seeds_path: :param fuzz_duration: :param use_asan: :param exec_timeout: :param qemu: :param config_dict: :return:<|endoftext|>