repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tanghaibao/goatools
goatools/grouper/plotobj.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/plotobj.py#L103-L108
def _get_plt_data(self, hdrgos_usr): """Given User GO IDs, return their GO headers and other GO info.""" hdrgo2usrgos = self.grprobj.get_hdrgo2usrgos(hdrgos_usr) usrgos_actual = set([u for us in hdrgo2usrgos.values() for u in us]) go2obj = self.gosubdag.get_go2obj(usrgos_actual.union(hdrgo2usrgos.keys())) return hdrgo2usrgos, go2obj
[ "def", "_get_plt_data", "(", "self", ",", "hdrgos_usr", ")", ":", "hdrgo2usrgos", "=", "self", ".", "grprobj", ".", "get_hdrgo2usrgos", "(", "hdrgos_usr", ")", "usrgos_actual", "=", "set", "(", "[", "u", "for", "us", "in", "hdrgo2usrgos", ".", "values", "(...
Given User GO IDs, return their GO headers and other GO info.
[ "Given", "User", "GO", "IDs", "return", "their", "GO", "headers", "and", "other", "GO", "info", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/updatehdr.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/updatehdr.py#L523-L561
def create_unique_wcsname(fimg, extnum, wcsname): """ This function evaluates whether the specified wcsname value has already been used in this image. If so, it automatically modifies the name with a simple version ID using wcsname_NNN format. Parameters ---------- fimg : obj PyFITS object of image with WCS information to be updated extnum : int Index of extension with WCS information to be updated wcsname : str Value of WCSNAME specified by user for labelling the new WCS Returns ------- uniqname : str Unique WCSNAME value """ wnames = list(wcsutil.altwcs.wcsnames(fimg, ext=extnum).values()) if wcsname not in wnames: uniqname = wcsname else: # setup pattern to match rpatt = re.compile(wcsname+'_\d') index = 0 for wname in wnames: rmatch = rpatt.match(wname) if rmatch: # get index n = int(wname[wname.rfind('_')+1:]) if n > index: index = 1 index += 1 # for use with new name uniqname = "%s_%d"%(wcsname,index) return uniqname
[ "def", "create_unique_wcsname", "(", "fimg", ",", "extnum", ",", "wcsname", ")", ":", "wnames", "=", "list", "(", "wcsutil", ".", "altwcs", ".", "wcsnames", "(", "fimg", ",", "ext", "=", "extnum", ")", ".", "values", "(", ")", ")", "if", "wcsname", "...
This function evaluates whether the specified wcsname value has already been used in this image. If so, it automatically modifies the name with a simple version ID using wcsname_NNN format. Parameters ---------- fimg : obj PyFITS object of image with WCS information to be updated extnum : int Index of extension with WCS information to be updated wcsname : str Value of WCSNAME specified by user for labelling the new WCS Returns ------- uniqname : str Unique WCSNAME value
[ "This", "function", "evaluates", "whether", "the", "specified", "wcsname", "value", "has", "already", "been", "used", "in", "this", "image", ".", "If", "so", "it", "automatically", "modifies", "the", "name", "with", "a", "simple", "version", "ID", "using", "...
python
train
blueset/ehForwarderBot
ehforwarderbot/coordinator.py
https://github.com/blueset/ehForwarderBot/blob/62e8fcfe77b2993aba91623f538f404a90f59f1d/ehforwarderbot/coordinator.py#L70-L81
def add_middleware(middleware: EFBMiddleware): """ Register a middleware with the coordinator. Args: middleware (EFBMiddleware): Middleware to register """ global middlewares if isinstance(middleware, EFBMiddleware): middlewares.append(middleware) else: raise TypeError("Middleware instance is expected")
[ "def", "add_middleware", "(", "middleware", ":", "EFBMiddleware", ")", ":", "global", "middlewares", "if", "isinstance", "(", "middleware", ",", "EFBMiddleware", ")", ":", "middlewares", ".", "append", "(", "middleware", ")", "else", ":", "raise", "TypeError", ...
Register a middleware with the coordinator. Args: middleware (EFBMiddleware): Middleware to register
[ "Register", "a", "middleware", "with", "the", "coordinator", "." ]
python
train
serge-sans-paille/pythran
pythran/syntax.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/syntax.py#L142-L176
def visit_ImportFrom(self, node): """ Check validity of imported functions. Check: - no level specific value are provided. - a module is provided - module/submodule exists in MODULES - imported function exists in the given module/submodule """ if node.level: raise PythranSyntaxError("Relative import not supported", node) if not node.module: raise PythranSyntaxError("import from without module", node) module = node.module current_module = MODULES # Check if module exists for path in module.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(module), node) else: current_module = current_module[path] # Check if imported functions exist for alias in node.names: if alias.name == '*': continue elif alias.name not in current_module: raise PythranSyntaxError( "identifier '{0}' not found in module '{1}'".format( alias.name, module), node)
[ "def", "visit_ImportFrom", "(", "self", ",", "node", ")", ":", "if", "node", ".", "level", ":", "raise", "PythranSyntaxError", "(", "\"Relative import not supported\"", ",", "node", ")", "if", "not", "node", ".", "module", ":", "raise", "PythranSyntaxError", "...
Check validity of imported functions. Check: - no level specific value are provided. - a module is provided - module/submodule exists in MODULES - imported function exists in the given module/submodule
[ "Check", "validity", "of", "imported", "functions", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1864-L1882
def close(self): """Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return if self.mode in "aw": self.fileobj.write(NUL * (BLOCKSIZE * 2)) self.offset += (BLOCKSIZE * 2) # fill up the end with zero-blocks # (like option -b20 for tar does) blocks, remainder = divmod(self.offset, RECORDSIZE) if remainder > 0: self.fileobj.write(NUL * (RECORDSIZE - remainder)) if not self._extfileobj: self.fileobj.close() self.closed = True
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "closed", ":", "return", "if", "self", ".", "mode", "in", "\"aw\"", ":", "self", ".", "fileobj", ".", "write", "(", "NUL", "*", "(", "BLOCKSIZE", "*", "2", ")", ")", "self", ".", "offset",...
Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive.
[ "Close", "the", "TarFile", ".", "In", "write", "-", "mode", "two", "finishing", "zero", "blocks", "are", "appended", "to", "the", "archive", "." ]
python
train
rmorshea/spectate
spectate/core.py
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L250-L273
def expose(*methods): """A decorator for exposing the methods of a class. Parameters ---------- *methods : str A str representation of the methods that should be exposed to callbacks. Returns ------- decorator : function A function accepting one argument - the class whose methods will be exposed - and which returns a new :class:`Watchable` that will notify a :class:`Spectator` when those methods are called. Notes ----- This is essentially a decorator version of :func:`expose_as` """ def setup(base): return expose_as(base.__name__, base, *methods) return setup
[ "def", "expose", "(", "*", "methods", ")", ":", "def", "setup", "(", "base", ")", ":", "return", "expose_as", "(", "base", ".", "__name__", ",", "base", ",", "*", "methods", ")", "return", "setup" ]
A decorator for exposing the methods of a class. Parameters ---------- *methods : str A str representation of the methods that should be exposed to callbacks. Returns ------- decorator : function A function accepting one argument - the class whose methods will be exposed - and which returns a new :class:`Watchable` that will notify a :class:`Spectator` when those methods are called. Notes ----- This is essentially a decorator version of :func:`expose_as`
[ "A", "decorator", "for", "exposing", "the", "methods", "of", "a", "class", "." ]
python
train
thunder-project/thunder
thunder/images/images.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L275-L292
def max_min_projection(self, axis=2): """ Compute maximum-minimum projection along a dimension. This computes the sum of the maximum and minimum values. Parameters ---------- axis : int, optional, default = 2 Which axis to compute projection along. """ if axis >= size(self.value_shape): raise Exception('Axis for projection (%s) exceeds ' 'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1)) new_value_shape = list(self.value_shape) del new_value_shape[axis] return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape)
[ "def", "max_min_projection", "(", "self", ",", "axis", "=", "2", ")", ":", "if", "axis", ">=", "size", "(", "self", ".", "value_shape", ")", ":", "raise", "Exception", "(", "'Axis for projection (%s) exceeds '", "'image dimensions (%s-%s)'", "%", "(", "axis", ...
Compute maximum-minimum projection along a dimension. This computes the sum of the maximum and minimum values. Parameters ---------- axis : int, optional, default = 2 Which axis to compute projection along.
[ "Compute", "maximum", "-", "minimum", "projection", "along", "a", "dimension", "." ]
python
train
numberoverzero/bottom
bottom/client.py
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/client.py#L110-L142
def on(self, event: str, func: Optional[Callable] = None) -> Callable: """ Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() """ if func is None: return functools.partial(self.on, event) # type: ignore wrapped = func if not asyncio.iscoroutinefunction(wrapped): wrapped = asyncio.coroutine(wrapped) self._event_handlers[event.upper()].append(wrapped) # Always return original return func
[ "def", "on", "(", "self", ",", "event", ":", "str", ",", "func", ":", "Optional", "[", "Callable", "]", "=", "None", ")", "->", "Callable", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "self", ".", "on", ",", ...
Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever()
[ "Decorate", "a", "function", "to", "be", "invoked", "when", "the", "given", "event", "occurs", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_synth.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_synth.py#L38-L136
def modulo_counter(start=0., modulo=256., step=1.): """ Creates a lazy endless counter stream with the given modulo, i.e., its values ranges from 0. to the given "modulo", somewhat equivalent to:\n Stream(itertools.count(start, step)) % modulo\n Yet the given step can be an iterable, and doen't create unneeded big ints. All inputs can be float. Input order remembers slice/range inputs. All inputs can also be iterables. If any of them is an iterable, the end of this counter happen when there's no more data in one of those inputs. to continue iteration. """ if isinstance(start, collections.Iterable): lastp = 0. c = 0. if isinstance(step, collections.Iterable): if isinstance(modulo, collections.Iterable): for p, m, s in xzip(start, modulo, step): c += p - lastp c = c % m % m yield c c += s lastp = p else: for p, s in xzip(start, step): c += p - lastp c = c % modulo % modulo yield c c += s lastp = p else: if isinstance(modulo, collections.Iterable): for p, m in xzip(start, modulo): c += p - lastp c = c % m % m yield c c += step lastp = p else: # Only start is iterable. This should be optimized! if step == 0: for p in start: yield p % modulo % modulo else: steps = int(modulo / step) if steps > 1: n = 0 for p in start: c += p - lastp yield (c + n * step) % modulo % modulo lastp = p n += 1 if n == steps: n = 0 c = (c + steps * step) % modulo % modulo else: for p in start: c += p - lastp c = c % modulo % modulo yield c c += step lastp = p else: c = start if isinstance(step, collections.Iterable): if isinstance(modulo, collections.Iterable): for m, s in xzip(modulo, step): c = c % m % m yield c c += s else: # Only step is iterable. This should be optimized! for s in step: c = c % modulo % modulo yield c c += s else: if isinstance(modulo, collections.Iterable): for m in modulo: c = c % m % m yield c c += step else: # None is iterable if step == 0: c = start % modulo % modulo while True: yield c else: steps = int(modulo / step) if steps > 1: n = 0 while True: yield (c + n * step) % modulo % modulo n += 1 if n == steps: n = 0 c = (c + steps * step) % modulo % modulo else: while True: c = c % modulo % modulo yield c c += step
[ "def", "modulo_counter", "(", "start", "=", "0.", ",", "modulo", "=", "256.", ",", "step", "=", "1.", ")", ":", "if", "isinstance", "(", "start", ",", "collections", ".", "Iterable", ")", ":", "lastp", "=", "0.", "c", "=", "0.", "if", "isinstance", ...
Creates a lazy endless counter stream with the given modulo, i.e., its values ranges from 0. to the given "modulo", somewhat equivalent to:\n Stream(itertools.count(start, step)) % modulo\n Yet the given step can be an iterable, and doen't create unneeded big ints. All inputs can be float. Input order remembers slice/range inputs. All inputs can also be iterables. If any of them is an iterable, the end of this counter happen when there's no more data in one of those inputs. to continue iteration.
[ "Creates", "a", "lazy", "endless", "counter", "stream", "with", "the", "given", "modulo", "i", ".", "e", ".", "its", "values", "ranges", "from", "0", ".", "to", "the", "given", "modulo", "somewhat", "equivalent", "to", ":", "\\", "n", "Stream", "(", "i...
python
train
awslabs/sockeye
sockeye/train.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/train.py#L746-L754
def gradient_compression_params(args: argparse.Namespace) -> Optional[Dict[str, Any]]: """ :param args: Arguments as returned by argparse. :return: Gradient compression parameters or None. """ if args.gradient_compression_type is None: return None else: return {'type': args.gradient_compression_type, 'threshold': args.gradient_compression_threshold}
[ "def", "gradient_compression_params", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "if", "args", ".", "gradient_compression_type", "is", "None", ":", "return", "None", "else", ":...
:param args: Arguments as returned by argparse. :return: Gradient compression parameters or None.
[ ":", "param", "args", ":", "Arguments", "as", "returned", "by", "argparse", ".", ":", "return", ":", "Gradient", "compression", "parameters", "or", "None", "." ]
python
train
ArabellaTech/aa-intercom
aa_intercom/models.py
https://github.com/ArabellaTech/aa-intercom/blob/f7e2ab63967529660f9c2fe4f1d0bf3cec1502c2/aa_intercom/models.py#L33-L42
def get_intercom_data(self): """Specify the data sent to Intercom API according to event type""" data = { "event_name": self.get_type_display(), # event type "created_at": calendar.timegm(self.created.utctimetuple()), # date "metadata": self.metadata } if self.user: data["user_id"] = self.user.intercom_id return data
[ "def", "get_intercom_data", "(", "self", ")", ":", "data", "=", "{", "\"event_name\"", ":", "self", ".", "get_type_display", "(", ")", ",", "# event type", "\"created_at\"", ":", "calendar", ".", "timegm", "(", "self", ".", "created", ".", "utctimetuple", "(...
Specify the data sent to Intercom API according to event type
[ "Specify", "the", "data", "sent", "to", "Intercom", "API", "according", "to", "event", "type" ]
python
train
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L65-L71
def generate_add_sub(self): ''' Generates prefixes/suffixes in a short form to parse and remove some redundancy ''' # Prefix or Suffix affix_type = 'p:' if self.opt == "PFX" else 's:' remove_char = '-' + self.char_to_strip if self.char_to_strip != '' else '' return affix_type + remove_char + '+' + self.affix
[ "def", "generate_add_sub", "(", "self", ")", ":", "# Prefix or Suffix", "affix_type", "=", "'p:'", "if", "self", ".", "opt", "==", "\"PFX\"", "else", "'s:'", "remove_char", "=", "'-'", "+", "self", ".", "char_to_strip", "if", "self", ".", "char_to_strip", "!...
Generates prefixes/suffixes in a short form to parse and remove some redundancy
[ "Generates", "prefixes", "/", "suffixes", "in", "a", "short", "form", "to", "parse", "and", "remove", "some", "redundancy" ]
python
train
moonso/query_phenomizer
query_phenomizer/utils.py
https://github.com/moonso/query_phenomizer/blob/19883ed125e224fc17cbb71240428fd60082e017/query_phenomizer/utils.py#L80-L104
def query_phenomizer(usr, pwd, *hpo_terms): """ Query the phenomizer web tool Arguments: usr (str): A username for phenomizer pwd (str): A password for phenomizer hpo_terms (list): A list with hpo terms Returns: raw_answer : The raw result from phenomizer """ base_string = 'http://compbio.charite.de/phenomizer/phenomizer/PhenomizerServiceURI' questions = {'mobilequery':'true', 'terms':','.join(hpo_terms), 'username':usr, 'password':pwd} try: r = requests.get(base_string, params=questions, timeout=10) except requests.exceptions.Timeout: raise RuntimeError("The request timed out.") if not r.status_code == requests.codes.ok: raise RuntimeError("Phenomizer returned a bad status code: %s" % r.status_code) r.encoding = 'utf-8' return r
[ "def", "query_phenomizer", "(", "usr", ",", "pwd", ",", "*", "hpo_terms", ")", ":", "base_string", "=", "'http://compbio.charite.de/phenomizer/phenomizer/PhenomizerServiceURI'", "questions", "=", "{", "'mobilequery'", ":", "'true'", ",", "'terms'", ":", "','", ".", ...
Query the phenomizer web tool Arguments: usr (str): A username for phenomizer pwd (str): A password for phenomizer hpo_terms (list): A list with hpo terms Returns: raw_answer : The raw result from phenomizer
[ "Query", "the", "phenomizer", "web", "tool", "Arguments", ":", "usr", "(", "str", ")", ":", "A", "username", "for", "phenomizer", "pwd", "(", "str", ")", ":", "A", "password", "for", "phenomizer", "hpo_terms", "(", "list", ")", ":", "A", "list", "with"...
python
train
prompt-toolkit/pymux
pymux/layout.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/layout.py#L358-L460
def _create_layout(self): """ Generate the main prompt_toolkit layout. """ waits_for_confirmation = WaitsForConfirmation(self.pymux) return FloatContainer( content=HSplit([ # The main window. FloatContainer( Background(), floats=[ Float(width=lambda: self.pymux.get_window_size().columns, height=lambda: self.pymux.get_window_size().rows, content=DynamicBody(self.pymux)) ]), # Status bar. ConditionalContainer( content=VSplit([ # Left. Window( height=1, width=(lambda: D(max=self.pymux.status_left_length)), dont_extend_width=True, content=FormattedTextControl(self._get_status_left_tokens)), # List of windows in the middle. Window( height=1, char=' ', align=self._get_align, content=FormattedTextControl(self._get_status_tokens)), # Right. Window( height=1, width=(lambda: D(max=self.pymux.status_right_length)), dont_extend_width=True, align=WindowAlign.RIGHT, content=FormattedTextControl(self._get_status_right_tokens)) ], z_index=Z_INDEX.STATUS_BAR, style='class:statusbar'), filter=Condition(lambda: self.pymux.enable_status), ) ]), floats=[ Float(bottom=1, left=0, z_index=Z_INDEX.MESSAGE_TOOLBAR, content=MessageToolbar(self.client_state)), Float(left=0, right=0, bottom=0, content=HSplit([ # Wait for confirmation toolbar. ConditionalContainer( content=Window( height=1, content=ConfirmationToolbar(self.pymux, self.client_state), z_index=Z_INDEX.COMMAND_LINE, ), filter=waits_for_confirmation, ), # ':' prompt toolbar. ConditionalContainer( content=Window( height=D(min=1), # Can be more if the command is multiline. style='class:commandline', dont_extend_height=True, content=BufferControl( buffer=self.client_state.command_buffer, preview_search=True, input_processors=[ AppendAutoSuggestion(), BeforeInput(':', style='class:commandline-prompt'), ShowArg(), HighlightSelectionProcessor(), ]), z_index=Z_INDEX.COMMAND_LINE, ), filter=has_focus(self.client_state.command_buffer), ), # Other command-prompt commands toolbar. ConditionalContainer( content=Window( height=1, style='class:commandline', content=BufferControl( buffer=self.client_state.prompt_buffer, input_processors=[ BeforeInput(self._before_prompt_command_tokens), AppendAutoSuggestion(), HighlightSelectionProcessor(), ]), z_index=Z_INDEX.COMMAND_LINE, ), filter=has_focus(self.client_state.prompt_buffer), ), ])), # Keys pop-up. Float( content=ConditionalContainer( content=self.popup_dialog, filter=Condition(lambda: self.client_state.display_popup), ), left=3, right=3, top=5, bottom=5, z_index=Z_INDEX.POPUP, ), Float(xcursor=True, ycursor=True, content=CompletionsMenu(max_height=12)), ] )
[ "def", "_create_layout", "(", "self", ")", ":", "waits_for_confirmation", "=", "WaitsForConfirmation", "(", "self", ".", "pymux", ")", "return", "FloatContainer", "(", "content", "=", "HSplit", "(", "[", "# The main window.", "FloatContainer", "(", "Background", "...
Generate the main prompt_toolkit layout.
[ "Generate", "the", "main", "prompt_toolkit", "layout", "." ]
python
train
python-bonobo/bonobo
bonobo/structs/graphs.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/structs/graphs.py#L85-L91
def add_node(self, c): """ Add a node without connections in this graph and returns its index. """ idx = len(self.nodes) self.edges[idx] = set() self.nodes.append(c) return idx
[ "def", "add_node", "(", "self", ",", "c", ")", ":", "idx", "=", "len", "(", "self", ".", "nodes", ")", "self", ".", "edges", "[", "idx", "]", "=", "set", "(", ")", "self", ".", "nodes", ".", "append", "(", "c", ")", "return", "idx" ]
Add a node without connections in this graph and returns its index.
[ "Add", "a", "node", "without", "connections", "in", "this", "graph", "and", "returns", "its", "index", "." ]
python
train
titusjan/argos
argos/inspector/abstract.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/abstract.py#L167-L222
def updateContents(self, reason=None, initiator=None): # TODO: reason mandatory? """ Tries to draw the widget contents with the updated RTI. Shows the error page in case an exception is raised while drawing the contents. Descendants should override _drawContents, not updateContents. During the call of _drawContents, the updating of the configuration tree is blocked to avoid circular effects. After that, a call to self.config.refreshFromTarget() is made to refresh the configuration tree with possible new values from the inspector (the inspector is the configuration's target, hence the name). The reason parameter is a string (one of the UpdateReason values) that indicates why the inspector contents whas updated. This can, for instance, be used to optimize drawing the inspector contents. Note that the reason may be undefined (None). The initiator may contain the object that initiated the updated. The type depends on the reason. At the moment the initiator is only implemented for the "config changed" reason. In this case the initiator will be the Config Tree Item (CTI that has changed). """ UpdateReason.checkValid(reason) logger.debug("---- Inspector updateContents, reason: {}, initiator: {}" .format(reason, initiator)) logger.debug("Inspector: {}".format(self)) logger.debug("RTI: {}".format(self.collector.rti)) try: self.setCurrentIndex(self.CONTENTS_PAGE_IDX) wasBlocked = self.config.model.setRefreshBlocked(True) try: self._drawContents(reason=reason, initiator=initiator) logger.debug("_drawContents finished successfully") # Update the config tree from the (possibly) new state of the PgLinePlot1d inspector, # e.g. the axis range may have changed while drawing. # self.config.updateTarget() # TODO: enable this here (instead of doing it in the inspector._drawContents when needed)? finally: self.config.model.setRefreshBlocked(wasBlocked) # Call refreshFromTarget in case the newly applied configuration resulted in a change # of the state of the configuration's target's (i.e. the inspector state) logger.debug("_drawContents finished successfully, calling refreshFromTarget...") self.config.refreshFromTarget() logger.debug("refreshFromTarget finished successfully") except InvalidDataError as ex: logger.info("Unable to draw the inspector contents: {}".format(ex)) except Exception as ex: if DEBUGGING: # TODO: enable raise logger.error("Error while drawing the inspector: {} ----".format(ex)) logger.exception(ex) self._clearContents() self.setCurrentIndex(self.ERROR_PAGE_IDX) self._showError(msg=str(ex), title=type_name(ex)) else: logger.debug("---- updateContents finished successfully")
[ "def", "updateContents", "(", "self", ",", "reason", "=", "None", ",", "initiator", "=", "None", ")", ":", "# TODO: reason mandatory?", "UpdateReason", ".", "checkValid", "(", "reason", ")", "logger", ".", "debug", "(", "\"---- Inspector updateContents, reason: {}, ...
Tries to draw the widget contents with the updated RTI. Shows the error page in case an exception is raised while drawing the contents. Descendants should override _drawContents, not updateContents. During the call of _drawContents, the updating of the configuration tree is blocked to avoid circular effects. After that, a call to self.config.refreshFromTarget() is made to refresh the configuration tree with possible new values from the inspector (the inspector is the configuration's target, hence the name). The reason parameter is a string (one of the UpdateReason values) that indicates why the inspector contents whas updated. This can, for instance, be used to optimize drawing the inspector contents. Note that the reason may be undefined (None). The initiator may contain the object that initiated the updated. The type depends on the reason. At the moment the initiator is only implemented for the "config changed" reason. In this case the initiator will be the Config Tree Item (CTI that has changed).
[ "Tries", "to", "draw", "the", "widget", "contents", "with", "the", "updated", "RTI", ".", "Shows", "the", "error", "page", "in", "case", "an", "exception", "is", "raised", "while", "drawing", "the", "contents", ".", "Descendants", "should", "override", "_dra...
python
train
estnltk/estnltk
estnltk/disambiguator.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/disambiguator.py#L159-L178
def __create_proper_names_lexicon(self, docs): """ Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku (mis kirjeldab, mitu korda iga pärisnimelemma esines); """ lemmaFreq = dict() for doc in docs: for word in doc[WORDS]: # 1) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad # (kui neid on) uniqLemmas = set() for analysis in word[ANALYSIS]: if analysis[POSTAG] == 'H': uniqLemmas.add( analysis[ROOT] ) # 2) Jäädvustame lemmade sagedused for lemma in uniqLemmas: if lemma not in lemmaFreq: lemmaFreq[lemma] = 1 else: lemmaFreq[lemma] += 1 return lemmaFreq
[ "def", "__create_proper_names_lexicon", "(", "self", ",", "docs", ")", ":", "lemmaFreq", "=", "dict", "(", ")", "for", "doc", "in", "docs", ":", "for", "word", "in", "doc", "[", "WORDS", "]", ":", "# 1) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad ", "# ...
Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku (mis kirjeldab, mitu korda iga pärisnimelemma esines);
[ "Moodustab", "dokumendikollektsiooni", "põhjal", "pärisnimede", "sagedussõnastiku", "(", "mis", "kirjeldab", "mitu", "korda", "iga", "pärisnimelemma", "esines", ")", ";" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/travasarou_2003.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/travasarou_2003.py#L95-L128
def _get_stddevs(self, rup, arias, stddev_types, sites): """ Return standard deviations as defined in table 1, p. 200. """ stddevs = [] # Magnitude dependent inter-event term (Eq. 13) if rup.mag < 4.7: tau = 0.611 elif rup.mag > 7.6: tau = 0.475 else: tau = 0.611 - 0.047 * (rup.mag - 4.7) # Retrieve site-class dependent sigma sigma1, sigma2 = self._get_intra_event_sigmas(sites) sigma = np.copy(sigma1) # Implements the nonlinear intra-event sigma (Eq. 14) idx = arias >= 0.125 sigma[idx] = sigma2[idx] idx = np.logical_and(arias > 0.013, arias < 0.125) sigma[idx] = sigma1[idx] - 0.106 * (np.log(arias[idx]) - np.log(0.0132)) sigma_total = np.sqrt(tau ** 2. + sigma ** 2.) for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(sigma_total) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(sigma) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(tau * np.ones_like(sites.vs30)) return stddevs
[ "def", "_get_stddevs", "(", "self", ",", "rup", ",", "arias", ",", "stddev_types", ",", "sites", ")", ":", "stddevs", "=", "[", "]", "# Magnitude dependent inter-event term (Eq. 13)", "if", "rup", ".", "mag", "<", "4.7", ":", "tau", "=", "0.611", "elif", "...
Return standard deviations as defined in table 1, p. 200.
[ "Return", "standard", "deviations", "as", "defined", "in", "table", "1", "p", ".", "200", "." ]
python
train
guaix-ucm/numina
numina/array/interpolation.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L146-L153
def _eval(self, v, in_bounds, der): """Eval polynomial inside bounds.""" result = np.zeros_like(v, dtype='float') x_indices = np.searchsorted(self._x, v, side='rigth') ids = x_indices[in_bounds] - 1 u = v[in_bounds] - self._x[ids] result[in_bounds] = self._poly_eval(u, ids, der) return result
[ "def", "_eval", "(", "self", ",", "v", ",", "in_bounds", ",", "der", ")", ":", "result", "=", "np", ".", "zeros_like", "(", "v", ",", "dtype", "=", "'float'", ")", "x_indices", "=", "np", ".", "searchsorted", "(", "self", ".", "_x", ",", "v", ","...
Eval polynomial inside bounds.
[ "Eval", "polynomial", "inside", "bounds", "." ]
python
train
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L2691-L2697
def silence(self): """ Route all stdout to null. """ sys.stdout = open(os.devnull, 'w') sys.stderr = open(os.devnull, 'w')
[ "def", "silence", "(", "self", ")", ":", "sys", ".", "stdout", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "sys", ".", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")" ]
Route all stdout to null.
[ "Route", "all", "stdout", "to", "null", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L685-L694
def get_distbins(start=100, bins=2500, ratio=1.01): """ Get exponentially sized """ b = np.ones(bins, dtype="float64") b[0] = 100 for i in range(1, bins): b[i] = b[i - 1] * ratio bins = np.around(b).astype(dtype="int") binsizes = np.diff(bins) return bins, binsizes
[ "def", "get_distbins", "(", "start", "=", "100", ",", "bins", "=", "2500", ",", "ratio", "=", "1.01", ")", ":", "b", "=", "np", ".", "ones", "(", "bins", ",", "dtype", "=", "\"float64\"", ")", "b", "[", "0", "]", "=", "100", "for", "i", "in", ...
Get exponentially sized
[ "Get", "exponentially", "sized" ]
python
train
ranaroussi/ezibpy
ezibpy/ezibpy.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L372-L390
def handleConnectionState(self, msg): """:Return: True if IBPy message `msg` indicates the connection is unavailable for any reason, else False.""" self.connected = not (msg.typeName == "error" and msg.errorCode in dataTypes["DISCONNECT_ERROR_CODES"]) if self.connected: self.connection_tracking["errors"] = [] self.connection_tracking["disconnected"] = False if msg.typeName == dataTypes["MSG_CURRENT_TIME"] and not self.connection_tracking["connected"]: self.log.info("[CONNECTION TO IB ESTABLISHED]") self.connection_tracking["connected"] = True self.ibCallback(caller="handleConnectionOpened", msg="<connectionOpened>") else: self.connection_tracking["connected"] = False if not self.connection_tracking["disconnected"]: self.connection_tracking["disconnected"] = True self.log.info("[CONNECTION TO IB LOST]")
[ "def", "handleConnectionState", "(", "self", ",", "msg", ")", ":", "self", ".", "connected", "=", "not", "(", "msg", ".", "typeName", "==", "\"error\"", "and", "msg", ".", "errorCode", "in", "dataTypes", "[", "\"DISCONNECT_ERROR_CODES\"", "]", ")", "if", "...
:Return: True if IBPy message `msg` indicates the connection is unavailable for any reason, else False.
[ ":", "Return", ":", "True", "if", "IBPy", "message", "msg", "indicates", "the", "connection", "is", "unavailable", "for", "any", "reason", "else", "False", "." ]
python
train
gbowerman/azurerm
azurerm/networkrp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/networkrp.py#L498-L515
def list_lb_nat_rules(access_token, subscription_id, resource_group, lb_name): '''List the inbound NAT rules for a load balancer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. lb_name (str): Name of the load balancer. Returns: HTTP response. JSON body of load balancer NAT rules. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, 'inboundNatRules?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
[ "def", "list_lb_nat_rules", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "lb_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourc...
List the inbound NAT rules for a load balancer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. lb_name (str): Name of the load balancer. Returns: HTTP response. JSON body of load balancer NAT rules.
[ "List", "the", "inbound", "NAT", "rules", "for", "a", "load", "balancer", "." ]
python
train
shichao-an/115wangpan
u115/api.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L585-L602
def mkdir(self, parent, name): """ Create a directory :param parent: the parent directory :param str name: the name of the new directory :return: the new directory :rtype: :class:`.Directory` """ pid = None cid = None if isinstance(parent, Directory): pid = parent.cid else: raise('Invalid Directory instance.') cid = self._req_files_add(pid, name)['cid'] return self._load_directory(cid)
[ "def", "mkdir", "(", "self", ",", "parent", ",", "name", ")", ":", "pid", "=", "None", "cid", "=", "None", "if", "isinstance", "(", "parent", ",", "Directory", ")", ":", "pid", "=", "parent", ".", "cid", "else", ":", "raise", "(", "'Invalid Directory...
Create a directory :param parent: the parent directory :param str name: the name of the new directory :return: the new directory :rtype: :class:`.Directory`
[ "Create", "a", "directory" ]
python
train
saltstack/salt
salt/states/grafana_datasource.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana_datasource.py#L39-L116
def present(name, type, url, access='proxy', user='', password='', database='', basic_auth=False, basic_auth_user='', basic_auth_password='', is_default=False, json_data=None, profile='grafana'): ''' Ensure that a data source is present. name Name of the data source. type Which type of data source it is ('graphite', 'influxdb' etc.). url The URL to the data source API. user Optional - user to authenticate with the data source password Optional - password to authenticate with the data source basic_auth Optional - set to True to use HTTP basic auth to authenticate with the data source. basic_auth_user Optional - HTTP basic auth username. basic_auth_password Optional - HTTP basic auth password. is_default Default: False ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} datasource = _get_datasource(profile, name) data = _get_json_data(name, type, url, access, user, password, database, basic_auth, basic_auth_user, basic_auth_password, is_default, json_data) if datasource: requests.put( _get_url(profile, datasource['id']), data, headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) ret['result'] = True ret['changes'] = _diff(datasource, data) if ret['changes']['new'] or ret['changes']['old']: ret['comment'] = 'Data source {0} updated'.format(name) else: ret['changes'] = {} ret['comment'] = 'Data source {0} already up-to-date'.format(name) else: requests.post( '{0}/api/datasources'.format(profile['grafana_url']), data, headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) ret['result'] = True ret['comment'] = 'New data source {0} added'.format(name) ret['changes'] = data return ret
[ "def", "present", "(", "name", ",", "type", ",", "url", ",", "access", "=", "'proxy'", ",", "user", "=", "''", ",", "password", "=", "''", ",", "database", "=", "''", ",", "basic_auth", "=", "False", ",", "basic_auth_user", "=", "''", ",", "basic_aut...
Ensure that a data source is present. name Name of the data source. type Which type of data source it is ('graphite', 'influxdb' etc.). url The URL to the data source API. user Optional - user to authenticate with the data source password Optional - password to authenticate with the data source basic_auth Optional - set to True to use HTTP basic auth to authenticate with the data source. basic_auth_user Optional - HTTP basic auth username. basic_auth_password Optional - HTTP basic auth password. is_default Default: False
[ "Ensure", "that", "a", "data", "source", "is", "present", "." ]
python
train
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L188-L201
def skip(self, other): '''(<<) Ends with a specified parser, and at the end parser consumed the end flag.''' @Parser def ends_with_parser(text, index): res = self(text, index) if not res.status: return res end = other(text, res.index) if end.status: return Value.success(end.index, res.value) else: return Value.failure(end.index, 'ends with {}'.format(end.expected)) return ends_with_parser
[ "def", "skip", "(", "self", ",", "other", ")", ":", "@", "Parser", "def", "ends_with_parser", "(", "text", ",", "index", ")", ":", "res", "=", "self", "(", "text", ",", "index", ")", "if", "not", "res", ".", "status", ":", "return", "res", "end", ...
(<<) Ends with a specified parser, and at the end parser consumed the end flag.
[ "(", "<<", ")", "Ends", "with", "a", "specified", "parser", "and", "at", "the", "end", "parser", "consumed", "the", "end", "flag", "." ]
python
train
openstack/networking-cisco
networking_cisco/ml2_drivers/ucsm/ucsm_db.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/ucsm/ucsm_db.py#L122-L133
def add_vnic_template(self, vlan_id, ucsm_ip, vnic_template, physnet): """Adds an entry for a vlan_id on a SP template to the table.""" if not self.get_vnic_template_vlan_entry(vlan_id, vnic_template, ucsm_ip, physnet): vnic_t = ucsm_model.VnicTemplate(vlan_id=vlan_id, vnic_template=vnic_template, device_id=ucsm_ip, physnet=physnet, updated_on_ucs=False) with self.session.begin(subtransactions=True): self.session.add(vnic_t) return vnic_t
[ "def", "add_vnic_template", "(", "self", ",", "vlan_id", ",", "ucsm_ip", ",", "vnic_template", ",", "physnet", ")", ":", "if", "not", "self", ".", "get_vnic_template_vlan_entry", "(", "vlan_id", ",", "vnic_template", ",", "ucsm_ip", ",", "physnet", ")", ":", ...
Adds an entry for a vlan_id on a SP template to the table.
[ "Adds", "an", "entry", "for", "a", "vlan_id", "on", "a", "SP", "template", "to", "the", "table", "." ]
python
train
mlenzen/collections-extended
collections_extended/setlists.py
https://github.com/mlenzen/collections-extended/blob/ee9e86f6bbef442dbebcb3a5970642c5c969e2cf/collections_extended/setlists.py#L300-L304
def pop(self, index=-1): """Remove and return the item at index.""" value = self._list.pop(index) del self._dict[value] return value
[ "def", "pop", "(", "self", ",", "index", "=", "-", "1", ")", ":", "value", "=", "self", ".", "_list", ".", "pop", "(", "index", ")", "del", "self", ".", "_dict", "[", "value", "]", "return", "value" ]
Remove and return the item at index.
[ "Remove", "and", "return", "the", "item", "at", "index", "." ]
python
train
grampajoe/pymosh
pymosh/riff.py
https://github.com/grampajoe/pymosh/blob/2a17a0271fda939528edc31572940d3b676f8a47/pymosh/riff.py#L132-L136
def remove(self, child): """Remove a child element.""" for i in range(len(self)): if self[i] == child: del self[i]
[ "def", "remove", "(", "self", ",", "child", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ")", ")", ":", "if", "self", "[", "i", "]", "==", "child", ":", "del", "self", "[", "i", "]" ]
Remove a child element.
[ "Remove", "a", "child", "element", "." ]
python
train
nikcub/floyd
floyd/core/multiopt.py
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/core/multiopt.py#L383-L441
def run(self): """ Run the multiopt parser """ self.parser = MultioptOptionParser( usage="%prog <command> [options] [args]", prog=self.clsname, version=self.version, option_list=self.global_options, description=self.desc_short, commands=self.command_set, epilog=self.footer ) try: self.options, self.args = self.parser.parse_args(self.argv) except Exception, e: print str(e) pass if len(self.args) < 1: self.parser.print_lax_help() return 2 self.command = self.args.pop(0) showHelp = False if self.command == 'help': if len(self.args) < 1: self.parser.print_lax_help() return 2 else: self.command = self.args.pop() showHelp = True if self.command not in self.valid_commands: self.parser.print_cmd_error(self.command) return 2 self.command_set[self.command].set_cmdname(self.command) subcmd_parser = self.command_set[self.command].get_parser(self.clsname, self.version, self.global_options) subcmd_options, subcmd_args = subcmd_parser.parse_args(self.args) if showHelp: subcmd_parser.print_help_long() return 1 try: self.command_set[self.command].func(subcmd_options, *subcmd_args) except (CommandError, TypeError), e: # self.parser.print_exec_error(self.command, str(e)) subcmd_parser.print_exec_error(self.command, str(e)) print # @TODO show command help # self.parser.print_lax_help() return 2 return 1
[ "def", "run", "(", "self", ")", ":", "self", ".", "parser", "=", "MultioptOptionParser", "(", "usage", "=", "\"%prog <command> [options] [args]\"", ",", "prog", "=", "self", ".", "clsname", ",", "version", "=", "self", ".", "version", ",", "option_list", "="...
Run the multiopt parser
[ "Run", "the", "multiopt", "parser" ]
python
train
gabstopper/smc-python
smc/actions/_search.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/actions/_search.py#L221-L231
def element_by_href_as_smcresult(href, params=None): """ Get specified element returned as an SMCResult object :param href: href direct link to object :return: :py:class:`smc.api.web.SMCResult` with etag, href and element field holding json, else None """ if href: element = fetch_json_by_href(href, params=params) if element: return element
[ "def", "element_by_href_as_smcresult", "(", "href", ",", "params", "=", "None", ")", ":", "if", "href", ":", "element", "=", "fetch_json_by_href", "(", "href", ",", "params", "=", "params", ")", "if", "element", ":", "return", "element" ]
Get specified element returned as an SMCResult object :param href: href direct link to object :return: :py:class:`smc.api.web.SMCResult` with etag, href and element field holding json, else None
[ "Get", "specified", "element", "returned", "as", "an", "SMCResult", "object" ]
python
train
Qiskit/qiskit-terra
qiskit/transpiler/passes/mapping/lookahead_swap.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/passes/mapping/lookahead_swap.py#L260-L274
def _copy_circuit_metadata(source_dag, coupling_map): """Return a copy of source_dag with metadata but empty. Generate only a single qreg in the output DAG, matching the size of the coupling_map.""" target_dag = DAGCircuit() target_dag.name = source_dag.name for creg in source_dag.cregs.values(): target_dag.add_creg(creg) device_qreg = QuantumRegister(len(coupling_map.physical_qubits), 'q') target_dag.add_qreg(device_qreg) return target_dag
[ "def", "_copy_circuit_metadata", "(", "source_dag", ",", "coupling_map", ")", ":", "target_dag", "=", "DAGCircuit", "(", ")", "target_dag", ".", "name", "=", "source_dag", ".", "name", "for", "creg", "in", "source_dag", ".", "cregs", ".", "values", "(", ")",...
Return a copy of source_dag with metadata but empty. Generate only a single qreg in the output DAG, matching the size of the coupling_map.
[ "Return", "a", "copy", "of", "source_dag", "with", "metadata", "but", "empty", ".", "Generate", "only", "a", "single", "qreg", "in", "the", "output", "DAG", "matching", "the", "size", "of", "the", "coupling_map", "." ]
python
test
not-na/peng3d
peng3d/peng.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/peng.py#L194-L229
def sendEvent(self,event,data=None): """ Sends an event with attached data. ``event`` should be a string of format ``<namespace>:<category1>.<subcategory2>.<name>``\ . There may be an arbitrary amount of subcategories. Also note that this format is not strictly enforced, but rather recommended by convention. ``data`` may be any Python Object, but it usually is a dictionary containing relevant parameters. For example, most built-in events use a dictionary containing at least the ``peng`` key set to an instance of this class. If there are no handlers for the event, a corresponding message will be printed to the log file. To prevent spam, the maximum amount of ignored messages can be configured via :confval:`events.maxignore` and defaults to 3. If the config value :confval:`debug.events.dumpfile` is a file path, the event type will be added to an internal list and be saved to the given file during program exit. """ if self.cfg["debug.events.dumpfile"]!="" and event not in self.event_list: self.event_list.add(event) if event not in self.eventHandlers: if event not in self.events_ignored or self.events_ignored[event]<=self.cfg["events.maxignore"]: # Prevents spamming logfile with ignored event messages # TODO: write to logfile # Needs a logging module first... self.events_ignored[event] = self.events_ignored.get(event,0)+1 return for handler in self.eventHandlers[event]: f = handler[0] try: f(event,data) except Exception: if not handler[1]: raise else: # TODO: write to logfile if self.cfg["events.removeonerror"]: self.delEventListener(event,f)
[ "def", "sendEvent", "(", "self", ",", "event", ",", "data", "=", "None", ")", ":", "if", "self", ".", "cfg", "[", "\"debug.events.dumpfile\"", "]", "!=", "\"\"", "and", "event", "not", "in", "self", ".", "event_list", ":", "self", ".", "event_list", "....
Sends an event with attached data. ``event`` should be a string of format ``<namespace>:<category1>.<subcategory2>.<name>``\ . There may be an arbitrary amount of subcategories. Also note that this format is not strictly enforced, but rather recommended by convention. ``data`` may be any Python Object, but it usually is a dictionary containing relevant parameters. For example, most built-in events use a dictionary containing at least the ``peng`` key set to an instance of this class. If there are no handlers for the event, a corresponding message will be printed to the log file. To prevent spam, the maximum amount of ignored messages can be configured via :confval:`events.maxignore` and defaults to 3. If the config value :confval:`debug.events.dumpfile` is a file path, the event type will be added to an internal list and be saved to the given file during program exit.
[ "Sends", "an", "event", "with", "attached", "data", ".", "event", "should", "be", "a", "string", "of", "format", "<namespace", ">", ":", "<category1", ">", ".", "<subcategory2", ">", ".", "<name", ">", "\\", ".", "There", "may", "be", "an", "arbitrary", ...
python
test
theosysbio/means
src/means/simulation/trajectory.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/simulation/trajectory.py#L322-L345
def _arithmetic_operation(self, other, operation): """ Applies an operation between the values of a trajectories and a scalar or between the respective values of two trajectories. In the latter case, trajectories should have equal descriptions and time points """ if isinstance(other, TrajectoryWithSensitivityData): if self.description != other.description: raise Exception("Cannot add trajectories with different descriptions") if not np.array_equal(self.timepoints, other.timepoints): raise Exception("Cannot add trajectories with different time points") new_values = operation(self.values, other.values) new_sensitivity_data = [operation(ssd, osd) for ssd, osd in zip(self.sensitivity_data, other.sensitivity_data)] elif isinstance(other, numbers.Real): new_values = operation(self.values, float(other)) new_sensitivity_data = [operation(ssd, float(other)) for ssd in self.sensitivity_data] else: raise Exception("Arithmetic operations is between two `TrajectoryWithSensitivityData`\ objects or a `TrajectoryWithSensitivityData` and a scalar.") return TrajectoryWithSensitivityData(self.timepoints, new_values, self.description, new_sensitivity_data )
[ "def", "_arithmetic_operation", "(", "self", ",", "other", ",", "operation", ")", ":", "if", "isinstance", "(", "other", ",", "TrajectoryWithSensitivityData", ")", ":", "if", "self", ".", "description", "!=", "other", ".", "description", ":", "raise", "Excepti...
Applies an operation between the values of a trajectories and a scalar or between the respective values of two trajectories. In the latter case, trajectories should have equal descriptions and time points
[ "Applies", "an", "operation", "between", "the", "values", "of", "a", "trajectories", "and", "a", "scalar", "or", "between", "the", "respective", "values", "of", "two", "trajectories", ".", "In", "the", "latter", "case", "trajectories", "should", "have", "equal...
python
train
Karaage-Cluster/python-tldap
tldap/fields.py
https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/fields.py#L195-L199
def value_to_db(self, value): """ Returns field's single value prepared for saving into a database. """ if isinstance(value, six.string_types): value = value.encode("utf_8") return value
[ "def", "value_to_db", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "value", "=", "value", ".", "encode", "(", "\"utf_8\"", ")", "return", "value" ]
Returns field's single value prepared for saving into a database.
[ "Returns", "field", "s", "single", "value", "prepared", "for", "saving", "into", "a", "database", "." ]
python
train
jjjake/internetarchive
internetarchive/iarequest.py
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/iarequest.py#L352-L463
def prepare_metadata(metadata, source_metadata=None, append=False, append_list=False): """Prepare a metadata dict for an :class:`S3PreparedRequest <S3PreparedRequest>` or :class:`MetadataPreparedRequest <MetadataPreparedRequest>` object. :type metadata: dict :param metadata: The metadata dict to be prepared. :type source_metadata: dict :param source_metadata: (optional) The source metadata for the item being modified. :rtype: dict :returns: A filtered metadata dict to be used for generating IA S3 and Metadata API requests. """ # Make a deepcopy of source_metadata if it exists. A deepcopy is # necessary to avoid modifying the original dict. source_metadata = {} if not source_metadata else copy.deepcopy(source_metadata) prepared_metadata = {} # Functions for dealing with metadata keys containing indexes. def get_index(key): match = re.search(r'(?<=\[)\d+(?=\])', key) if match is not None: return int(match.group()) def rm_index(key): return key.split('[')[0] # Create indexed_keys counter dict. i.e.: {'subject': 3} -- subject # (with the index removed) appears 3 times in the metadata dict. indexed_keys = {} for key in metadata: # Convert number values to strings! if isinstance(metadata[key], (six.integer_types, float, complex)): metadata[key] = str(metadata[key]) if get_index(key) is None: continue count = len([x for x in metadata if rm_index(x) == rm_index(key)]) indexed_keys[rm_index(key)] = count # Initialize the values for all indexed_keys. for key in indexed_keys: # Increment the counter so we know how many values the final # value in prepared_metadata should have. indexed_keys[key] += len(source_metadata.get(key, [])) # Intialize the value in the prepared_metadata dict. prepared_metadata[key] = source_metadata.get(key, []) if not isinstance(prepared_metadata[key], list): prepared_metadata[key] = [prepared_metadata[key]] # Fill the value of the prepared_metadata key with None values # so all indexed items can be indexed in order. while len(prepared_metadata[key]) < indexed_keys[key]: prepared_metadata[key].append(None) # Index all items which contain an index. for key in metadata: # Insert values from indexed keys into prepared_metadata dict. if (rm_index(key) in indexed_keys): try: prepared_metadata[rm_index(key)][get_index(key)] = metadata[key] except IndexError: prepared_metadata[rm_index(key)].append(metadata[key]) # If append is True, append value to source_metadata value. elif append_list and source_metadata.get(key): if not isinstance(metadata[key], list): metadata[key] = [metadata[key]] for v in metadata[key]: if not isinstance(source_metadata[key], list): if v in [source_metadata[key]]: continue else: if v in source_metadata[key]: continue if not isinstance(source_metadata[key], list): prepared_metadata[key] = [source_metadata[key]] else: prepared_metadata[key] = source_metadata[key] prepared_metadata[key].append(v) elif append and source_metadata.get(key): prepared_metadata[key] = '{0} {1}'.format( source_metadata[key], metadata[key]) else: prepared_metadata[key] = metadata[key] # Remove values from metadata if value is REMOVE_TAG. _done = [] for key in indexed_keys: # Filter None values from items with arrays as values prepared_metadata[key] = [v for v in prepared_metadata[key] if v] # Only filter the given indexed key if it has not already been # filtered. if key not in _done: indexes = [] for k in metadata: if not get_index(k): continue elif not rm_index(k) == key: continue elif not metadata[k] == 'REMOVE_TAG': continue else: indexes.append(get_index(k)) # Delete indexed values in reverse to not throw off the # subsequent indexes. for i in sorted(indexes, reverse=True): del prepared_metadata[key][i] _done.append(key) return prepared_metadata
[ "def", "prepare_metadata", "(", "metadata", ",", "source_metadata", "=", "None", ",", "append", "=", "False", ",", "append_list", "=", "False", ")", ":", "# Make a deepcopy of source_metadata if it exists. A deepcopy is", "# necessary to avoid modifying the original dict.", "...
Prepare a metadata dict for an :class:`S3PreparedRequest <S3PreparedRequest>` or :class:`MetadataPreparedRequest <MetadataPreparedRequest>` object. :type metadata: dict :param metadata: The metadata dict to be prepared. :type source_metadata: dict :param source_metadata: (optional) The source metadata for the item being modified. :rtype: dict :returns: A filtered metadata dict to be used for generating IA S3 and Metadata API requests.
[ "Prepare", "a", "metadata", "dict", "for", "an", ":", "class", ":", "S3PreparedRequest", "<S3PreparedRequest", ">", "or", ":", "class", ":", "MetadataPreparedRequest", "<MetadataPreparedRequest", ">", "object", "." ]
python
train
Phylliade/ikpy
src/ikpy/geometry_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/geometry_utils.py#L51-L53
def symbolic_rotation_matrix(phi, theta, symbolic_psi): """Retourne une matrice de rotation où psi est symbolique""" return sympy.Matrix(Rz_matrix(phi)) * sympy.Matrix(Rx_matrix(theta)) * symbolic_Rz_matrix(symbolic_psi)
[ "def", "symbolic_rotation_matrix", "(", "phi", ",", "theta", ",", "symbolic_psi", ")", ":", "return", "sympy", ".", "Matrix", "(", "Rz_matrix", "(", "phi", ")", ")", "*", "sympy", ".", "Matrix", "(", "Rx_matrix", "(", "theta", ")", ")", "*", "symbolic_Rz...
Retourne une matrice de rotation où psi est symbolique
[ "Retourne", "une", "matrice", "de", "rotation", "où", "psi", "est", "symbolique" ]
python
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L604-L624
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x=150, res_y=150, extra_args=None): """Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.""" if extra_args is None: extra_args = [] if not pdftoppm_executable: init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True) if old_pdftoppm_version: # We only have -r, not -rx and -ry. command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name, root_output_file_path] else: command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y, pdf_file_name, root_output_file_path] comm_output = get_external_subprocess_output(command) return comm_output
[ "def", "render_pdf_file_to_image_files_pdftoppm_ppm", "(", "pdf_file_name", ",", "root_output_file_path", ",", "res_x", "=", "150", ",", "res_y", "=", "150", ",", "extra_args", "=", "None", ")", ":", "if", "extra_args", "is", "None", ":", "extra_args", "=", "[",...
Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.
[ "Use", "the", "pdftoppm", "program", "to", "render", "a", "PDF", "file", "to", ".", "png", "images", ".", "The", "root_output_file_path", "is", "prepended", "to", "all", "the", "output", "files", "which", "have", "numbers", "and", "extensions", "added", ".",...
python
train
fakedrake/overlay_parse
overlay_parse/matchers.py
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/matchers.py#L68-L82
def fit_overlays(self, text, start=None, end=None, **kw): """ Get an overlay thet fits the range [start, end). """ _text = text[start or 0:] if end: _text = _text[:end] m = self.regex.match(unicode(_text)) if m: yield Overlay(text, (start + m.start(), start + m.end()), props=self.props, value=self.value(rxmatch=m))
[ "def", "fit_overlays", "(", "self", ",", "text", ",", "start", "=", "None", ",", "end", "=", "None", ",", "*", "*", "kw", ")", ":", "_text", "=", "text", "[", "start", "or", "0", ":", "]", "if", "end", ":", "_text", "=", "_text", "[", ":", "e...
Get an overlay thet fits the range [start, end).
[ "Get", "an", "overlay", "thet", "fits", "the", "range", "[", "start", "end", ")", "." ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L883-L900
def z_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) return R
[ "def", "z_axis_rotation", "(", "theta", ")", ":", "R", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "-", "np", ".", "sin", "(", "theta", ")", ",", "0", "]", ",", "[", "np", ".", "sin", "(", "theta", ")",...
Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "3x3", "rotation", "matrix", "for", "a", "rotation", "of", "angle", "theta", "about", "the", "z", "axis", "." ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/alembic/2e97565eba72_create_files_rest_tables.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/alembic/2e97565eba72_create_files_rest_tables.py#L217-L226
def downgrade(): """Downgrade database.""" op.drop_table('files_multipartobject_part') op.drop_index(op.f('ix_files_object__mimetype'), table_name='files_object') op.drop_table('files_object') op.drop_table('files_multipartobject') op.drop_table('files_buckettags') op.drop_table('files_bucket') op.drop_table('files_location') op.drop_table('files_files')
[ "def", "downgrade", "(", ")", ":", "op", ".", "drop_table", "(", "'files_multipartobject_part'", ")", "op", ".", "drop_index", "(", "op", ".", "f", "(", "'ix_files_object__mimetype'", ")", ",", "table_name", "=", "'files_object'", ")", "op", ".", "drop_table",...
Downgrade database.
[ "Downgrade", "database", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xnavigationedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnavigationedit.py#L468-L476
def mouseDoubleClickEvent( self, event ): """ Overloads the system to enable editing when a user double clicks. :param event | <QMouseEvent> """ super(XNavigationEdit, self).mouseDoubleClickEvent(event) self.startEdit()
[ "def", "mouseDoubleClickEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XNavigationEdit", ",", "self", ")", ".", "mouseDoubleClickEvent", "(", "event", ")", "self", ".", "startEdit", "(", ")" ]
Overloads the system to enable editing when a user double clicks. :param event | <QMouseEvent>
[ "Overloads", "the", "system", "to", "enable", "editing", "when", "a", "user", "double", "clicks", ".", ":", "param", "event", "|", "<QMouseEvent", ">" ]
python
train
Yinzo/reprint
reprint/reprint.py
https://github.com/Yinzo/reprint/blob/5bf4129ad7da4086fdb07493e46afabb5fb08e93/reprint/reprint.py#L129-L180
def print_multi_line(content, force_single_line, sort_key): """ 'sort_key' 参数只在 dict 模式时有效 'sort_key' parameter only available in 'dict' mode """ global last_output_lines global overflow_flag global is_atty if not is_atty: if isinstance(content, list): for line in content: print(line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): print("{}: {}".format(k, v)) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) return columns, rows = get_terminal_size() lines = lines_of_content(content, columns) if force_single_line is False and lines > rows: overflow_flag = True elif force_single_line is True and len(content) > rows: overflow_flag = True # 确保初始输出位置是位于最左处的 # to make sure the cursor is at the left most print("\b" * columns, end="") if isinstance(content, list): for line in content: _line = preprocess(line) print_line(_line, columns, force_single_line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): _k, _v = map(preprocess, (k, v)) print_line("{}: {}".format(_k, _v), columns, force_single_line) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) # 输出额外的空行来清除上一次输出的剩余内容 # do extra blank lines to wipe the remaining of last output print(" " * columns * (last_output_lines - lines), end="") # 回到初始输出位置 # back to the origin pos print(magic_char * (max(last_output_lines, lines)-1), end="") sys.stdout.flush() last_output_lines = lines
[ "def", "print_multi_line", "(", "content", ",", "force_single_line", ",", "sort_key", ")", ":", "global", "last_output_lines", "global", "overflow_flag", "global", "is_atty", "if", "not", "is_atty", ":", "if", "isinstance", "(", "content", ",", "list", ")", ":",...
'sort_key' 参数只在 dict 模式时有效 'sort_key' parameter only available in 'dict' mode
[ "sort_key", "参数只在", "dict", "模式时有效", "sort_key", "parameter", "only", "available", "in", "dict", "mode" ]
python
train
sosreport/sos
example_plugins/example.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/example_plugins/example.py#L29-L50
def setup(self): ''' First phase - Collect all the information we need. Directories are copied recursively. arbitrary commands may be executed using the collectExtOutput() method. Information is automatically saved, and links are presented in the report to each file or directory which has been copied to the saved tree. Also, links are provided to the output from each command. ''' # Here's how to copy files and directory trees self.add_copy_spec("/etc/hosts") with open("/proc/cpuinfo") as f: for line in f: if "vendor_id" in line: self.add_alert("Vendor ID string is: %s <br>\n" % line) # Here's how to test your options and execute if enabled if self.option_enabled("init.d"): self.add_copy_spec("/etc/init.d") # copies a whole directory tree # Here's how to execute a command self.collectExtOutput("/bin/ps -ef")
[ "def", "setup", "(", "self", ")", ":", "# Here's how to copy files and directory trees", "self", ".", "add_copy_spec", "(", "\"/etc/hosts\"", ")", "with", "open", "(", "\"/proc/cpuinfo\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "\"vendor_id\""...
First phase - Collect all the information we need. Directories are copied recursively. arbitrary commands may be executed using the collectExtOutput() method. Information is automatically saved, and links are presented in the report to each file or directory which has been copied to the saved tree. Also, links are provided to the output from each command.
[ "First", "phase", "-", "Collect", "all", "the", "information", "we", "need", ".", "Directories", "are", "copied", "recursively", ".", "arbitrary", "commands", "may", "be", "executed", "using", "the", "collectExtOutput", "()", "method", ".", "Information", "is", ...
python
train
nerdvegas/rez
src/rez/package_serialise.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/package_serialise.py#L97-L126
def dump_package_data(data, buf, format_=FileFormat.py, skip_attributes=None): """Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print. """ if format_ == FileFormat.txt: raise ValueError("'txt' format not supported for packages.") data_ = dict((k, v) for k, v in data.iteritems() if v is not None) data_ = package_serialise_schema.validate(data_) skip = set(skip_attributes or []) items = [] for key in package_key_order: if key not in skip: value = data_.pop(key, None) if value is not None: items.append((key, value)) # remaining are arbitrary keys for key, value in data_.iteritems(): if key not in skip: items.append((key, value)) dump_func = dump_functions[format_] dump_func(items, buf)
[ "def", "dump_package_data", "(", "data", ",", "buf", ",", "format_", "=", "FileFormat", ".", "py", ",", "skip_attributes", "=", "None", ")", ":", "if", "format_", "==", "FileFormat", ".", "txt", ":", "raise", "ValueError", "(", "\"'txt' format not supported fo...
Write package data to `buf`. Args: data (dict): Data source - must conform to `package_serialise_schema`. buf (file-like object): Destination stream. format_ (`FileFormat`): Format to dump data in. skip_attributes (list of str): List of attributes to not print.
[ "Write", "package", "data", "to", "buf", "." ]
python
train
agile-geoscience/welly
welly/utils.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/utils.py#L512-L522
def text_colour_for_hex(hexx, percent=50, dark='#000000', light='#ffffff'): """ Function to decide what colour to use for a given hex colour. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: bool: The colour's brightness is less than the given percent. """ return light if hex_is_dark(hexx, percent=percent) else dark
[ "def", "text_colour_for_hex", "(", "hexx", ",", "percent", "=", "50", ",", "dark", "=", "'#000000'", ",", "light", "=", "'#ffffff'", ")", ":", "return", "light", "if", "hex_is_dark", "(", "hexx", ",", "percent", "=", "percent", ")", "else", "dark" ]
Function to decide what colour to use for a given hex colour. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: bool: The colour's brightness is less than the given percent.
[ "Function", "to", "decide", "what", "colour", "to", "use", "for", "a", "given", "hex", "colour", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/basicpar.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/basicpar.py#L1163-L1167
def _setChoiceDict(self): """Create min-match dictionary for choice list""" # value is full name of choice parameter self.choiceDict = minmatch.MinMatchDict() for c in self.choice: self.choiceDict.add(c, c)
[ "def", "_setChoiceDict", "(", "self", ")", ":", "# value is full name of choice parameter", "self", ".", "choiceDict", "=", "minmatch", ".", "MinMatchDict", "(", ")", "for", "c", "in", "self", ".", "choice", ":", "self", ".", "choiceDict", ".", "add", "(", "...
Create min-match dictionary for choice list
[ "Create", "min", "-", "match", "dictionary", "for", "choice", "list" ]
python
train
stevearc/dql
dql/grammar/query.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/grammar/query.py#L86-L97
def create_where(): """ Create a grammar for the 'where' clause used by 'select' """ conjunction = Forward().setResultsName("conjunction") nested = Group(Suppress("(") + conjunction + Suppress(")")).setResultsName( "conjunction" ) maybe_nested = nested | constraint inverted = Group(not_ + maybe_nested).setResultsName("not") full_constraint = maybe_nested | inverted conjunction <<= full_constraint + OneOrMore(and_or + full_constraint) return upkey("where") + Group(conjunction | full_constraint).setResultsName("where")
[ "def", "create_where", "(", ")", ":", "conjunction", "=", "Forward", "(", ")", ".", "setResultsName", "(", "\"conjunction\"", ")", "nested", "=", "Group", "(", "Suppress", "(", "\"(\"", ")", "+", "conjunction", "+", "Suppress", "(", "\")\"", ")", ")", "....
Create a grammar for the 'where' clause used by 'select'
[ "Create", "a", "grammar", "for", "the", "where", "clause", "used", "by", "select" ]
python
train
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L688-L690
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0): """Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)""" return orig_getaddrinfo(host, port, family, socktype, proto, flags)
[ "def", "getaddrinfo_wrapper", "(", "host", ",", "port", ",", "family", "=", "socket", ".", "AF_INET", ",", "socktype", "=", "0", ",", "proto", "=", "0", ",", "flags", "=", "0", ")", ":", "return", "orig_getaddrinfo", "(", "host", ",", "port", ",", "f...
Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)
[ "Patched", "getaddrinfo", "with", "default", "family", "IPv4", "(", "enabled", "by", "settings", "IPV4_ONLY", "=", "True", ")" ]
python
train
pycontribs/pyrax
pyrax/cloudcdn.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudcdn.py#L118-L127
def _configure_manager(self): """ Creates the Manager instances to handle monitoring. """ self._flavor_manager = CloudCDNFlavorManager(self, uri_base="flavors", resource_class=CloudCDNFlavor, response_key=None, plural_response_key="flavors") self._services_manager = CloudCDNServiceManager(self, uri_base="services", resource_class=CloudCDNService, response_key=None, plural_response_key="services")
[ "def", "_configure_manager", "(", "self", ")", ":", "self", ".", "_flavor_manager", "=", "CloudCDNFlavorManager", "(", "self", ",", "uri_base", "=", "\"flavors\"", ",", "resource_class", "=", "CloudCDNFlavor", ",", "response_key", "=", "None", ",", "plural_respons...
Creates the Manager instances to handle monitoring.
[ "Creates", "the", "Manager", "instances", "to", "handle", "monitoring", "." ]
python
train
saltstack/salt
salt/thorium/check.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/check.py#L277-L305
def event(name): ''' Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} for event in __events__: if salt.utils.stringutils.expr_match(event['tag'], name): ret['result'] = True return ret
[ "def", "event", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", ",", "'result'", ":", "False", "}", "for", "event", "in", "__events__", ":", "if", "salt", ".", "utils", ...
Chekcs for a specific event match and returns result True if the match happens USAGE: .. code-block:: yaml salt/foo/*/bar: check.event run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: salt/foo/*/bar
[ "Chekcs", "for", "a", "specific", "event", "match", "and", "returns", "result", "True", "if", "the", "match", "happens" ]
python
train
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/manager.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L630-L634
def cancel(self, *args, **kwargs): """ Cancel all queue items - then attempt to cancel all in progress items """ self._cancel_called = True self.clear_waiting_coordinators(cancel=True) super(AsperaTransferCoordinatorController, self).cancel(*args, **kwargs)
[ "def", "cancel", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_cancel_called", "=", "True", "self", ".", "clear_waiting_coordinators", "(", "cancel", "=", "True", ")", "super", "(", "AsperaTransferCoordinatorController", ",...
Cancel all queue items - then attempt to cancel all in progress items
[ "Cancel", "all", "queue", "items", "-", "then", "attempt", "to", "cancel", "all", "in", "progress", "items" ]
python
train
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1577-L1596
def export(self, xformat='csv'): """action: export annotations to CSV.""" if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return if xformat == 'csv': filename = splitext(self.annot.xml_file)[0] + '.csv' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.csv)') if 'remlogic' in xformat: filename = splitext(self.annot.xml_file)[0] + '.txt' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.txt)') if filename == '': return self.annot.export(filename, xformat=xformat)
[ "def", "export", "(", "self", ",", "xformat", "=", "'csv'", ")", ":", "if", "self", ".", "annot", "is", "None", ":", "# remove if buttons are disabled", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'No score file loaded'", ")"...
action: export annotations to CSV.
[ "action", ":", "export", "annotations", "to", "CSV", "." ]
python
train
fossasia/AYABInterface
AYABInterface/communication/states.py
https://github.com/fossasia/AYABInterface/blob/e2065eed8daf17b2936f6ca5e488c9bfb850914e/AYABInterface/communication/states.py#L418-L431
def receive_start_confirmation(self, message): """Receive a StartConfirmation message. :param message: a :class:`StartConfirmation <AYABInterface.communication.hardware_messages.StartConfirmation>` message If the message indicates success, the communication object transitions into :class:`KnittingStarted` or else, into :class:`StartingFailed`. """ if message.is_success(): self._next(KnittingStarted) else: self._next(StartingFailed)
[ "def", "receive_start_confirmation", "(", "self", ",", "message", ")", ":", "if", "message", ".", "is_success", "(", ")", ":", "self", ".", "_next", "(", "KnittingStarted", ")", "else", ":", "self", ".", "_next", "(", "StartingFailed", ")" ]
Receive a StartConfirmation message. :param message: a :class:`StartConfirmation <AYABInterface.communication.hardware_messages.StartConfirmation>` message If the message indicates success, the communication object transitions into :class:`KnittingStarted` or else, into :class:`StartingFailed`.
[ "Receive", "a", "StartConfirmation", "message", "." ]
python
train
bitesofcode/xqt
xqt/wrappers/pyside.py
https://github.com/bitesofcode/xqt/blob/befa649a2f2104a20d49c8c78ffdba5907fd94d2/xqt/wrappers/pyside.py#L109-L121
def createActionGroup(self, parent=None, name=''): """ Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str> """ actionGroup = super(UiLoader, self).createActionGroup(parent, name) if not actionGroup.parent(): actionGroup.setParent(self._baseinstance) setattr(self._baseinstance, name, actionGroup) return actionGroup
[ "def", "createActionGroup", "(", "self", ",", "parent", "=", "None", ",", "name", "=", "''", ")", ":", "actionGroup", "=", "super", "(", "UiLoader", ",", "self", ")", ".", "createActionGroup", "(", "parent", ",", "name", ")", "if", "not", "actionGroup", ...
Overloads teh create action method to handle the proper base instance information, similar to the PyQt4 loading system. :param parent | <QWidget> || None name | <str>
[ "Overloads", "teh", "create", "action", "method", "to", "handle", "the", "proper", "base", "instance", "information", "similar", "to", "the", "PyQt4", "loading", "system", ".", ":", "param", "parent", "|", "<QWidget", ">", "||", "None", "name", "|", "<str", ...
python
train
hyperledger/indy-plenum
plenum/server/replica.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L2589-L2599
def _request_prepare(self, three_pc_key: Tuple[int, int], recipients: List[str] = None, stash_data: Optional[Tuple[str, str, str]] = None) -> bool: """ Request preprepare """ if recipients is None: recipients = self.node.nodestack.connecteds.copy() primaryName = self.primaryName[:self.primaryName.rfind(":")] recipients.discard(primaryName) return self._request_three_phase_msg(three_pc_key, self.requested_prepares, PREPARE, recipients, stash_data)
[ "def", "_request_prepare", "(", "self", ",", "three_pc_key", ":", "Tuple", "[", "int", ",", "int", "]", ",", "recipients", ":", "List", "[", "str", "]", "=", "None", ",", "stash_data", ":", "Optional", "[", "Tuple", "[", "str", ",", "str", ",", "str"...
Request preprepare
[ "Request", "preprepare" ]
python
train
kenneth-reitz/args
args.py
https://github.com/kenneth-reitz/args/blob/9460f1a35eb3055e9e4de1f0a6932e0883c72d65/args.py#L181-L198
def start_with(self, x): """Returns all arguments beginning with given string (or list thereof). """ _args = [] for arg in self.all: if _is_collection(x): for _x in x: if arg.startswith(x): _args.append(arg) break else: if arg.startswith(x): _args.append(arg) return ArgsList(_args, no_argv=True)
[ "def", "start_with", "(", "self", ",", "x", ")", ":", "_args", "=", "[", "]", "for", "arg", "in", "self", ".", "all", ":", "if", "_is_collection", "(", "x", ")", ":", "for", "_x", "in", "x", ":", "if", "arg", ".", "startswith", "(", "x", ")", ...
Returns all arguments beginning with given string (or list thereof).
[ "Returns", "all", "arguments", "beginning", "with", "given", "string", "(", "or", "list", "thereof", ")", "." ]
python
train
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L565-L606
def gpg_profile_delete_key( blockchain_id, key_id, proxy=None, wallet_keys=None ): """ Remove a GPG from a blockchain ID's global account. Do NOT remove it from the local keyring. Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed. Return {'error': ...} on error """ res = client.delete_account( blockchain_id, "pgp", key_id, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res removed_accounts = res['removed'] errors = [] # blow away all state for account in removed_accounts: if not account.has_key('contentUrl'): continue key_url = account['contentUrl'] if key_url.startswith("blockstack://"): # delete try: res = client.data_delete( key_url, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: errors.append({'key_url': key_url, 'message': res['error']}) except AssertionError, e: log.exception(e) log.error("Failed to delete '%s'" % key_url) raise except Exception, e: log.exception(e) log.error("Failed to delete '%s'" % key_url) continue ret = {'status': True} if len(errors) > 0: ret['delete_errors'] = errors return ret
[ "def", "gpg_profile_delete_key", "(", "blockchain_id", ",", "key_id", ",", "proxy", "=", "None", ",", "wallet_keys", "=", "None", ")", ":", "res", "=", "client", ".", "delete_account", "(", "blockchain_id", ",", "\"pgp\"", ",", "key_id", ",", "proxy", "=", ...
Remove a GPG from a blockchain ID's global account. Do NOT remove it from the local keyring. Return {'status': True, ...} on success. May include 'delete_errors' if any specific keys couldn't be removed. Return {'error': ...} on error
[ "Remove", "a", "GPG", "from", "a", "blockchain", "ID", "s", "global", "account", ".", "Do", "NOT", "remove", "it", "from", "the", "local", "keyring", ".", "Return", "{", "status", ":", "True", "...", "}", "on", "success", ".", "May", "include", "delete...
python
train
jeffh/pyconstraints
pyconstraints/solvers.py
https://github.com/jeffh/pyconstraints/blob/923abce2f9ba484d1964165616a253bbccd1a630/pyconstraints/solvers.py#L161-L167
def satisfies_constraints(self, possible_solution): """Return True if the given solution is satisfied by all the constraints.""" for c in self._constraints: values = c.extract_values(possible_solution) if values is None or not c(*values): return False return True
[ "def", "satisfies_constraints", "(", "self", ",", "possible_solution", ")", ":", "for", "c", "in", "self", ".", "_constraints", ":", "values", "=", "c", ".", "extract_values", "(", "possible_solution", ")", "if", "values", "is", "None", "or", "not", "c", "...
Return True if the given solution is satisfied by all the constraints.
[ "Return", "True", "if", "the", "given", "solution", "is", "satisfied", "by", "all", "the", "constraints", "." ]
python
train
CZ-NIC/yangson
yangson/schemanode.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemanode.py#L653-L668
def _ascii_tree(self, indent: str, no_types: bool, val_count: bool) -> str: """Return the receiver's subtree as ASCII art.""" def suffix(sn): return f" {{{sn.val_count}}}\n" if val_count else "\n" if not self.children: return "" cs = [] for c in self.children: cs.extend(c._flatten()) cs.sort(key=lambda x: x.qual_name) res = "" for c in cs[:-1]: res += (indent + c._tree_line(no_types) + suffix(c) + c._ascii_tree(indent + "| ", no_types, val_count)) return (res + indent + cs[-1]._tree_line(no_types) + suffix(cs[-1]) + cs[-1]._ascii_tree(indent + " ", no_types, val_count))
[ "def", "_ascii_tree", "(", "self", ",", "indent", ":", "str", ",", "no_types", ":", "bool", ",", "val_count", ":", "bool", ")", "->", "str", ":", "def", "suffix", "(", "sn", ")", ":", "return", "f\" {{{sn.val_count}}}\\n\"", "if", "val_count", "else", "\...
Return the receiver's subtree as ASCII art.
[ "Return", "the", "receiver", "s", "subtree", "as", "ASCII", "art", "." ]
python
train
crackinglandia/pype32
pype32/pype32.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L1204-L1224
def _parseDebugDirectory(self, rva, size, magic = consts.PE32): """ Parses the C{IMAGE_DEBUG_DIRECTORY} directory. @see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx} @type rva: int @param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object. """ debugDirData = self.getDataAtRva(rva, size) numberOfEntries = size / consts.SIZEOF_IMAGE_DEBUG_ENTRY32 rd = utils.ReadData(debugDirData) return directories.ImageDebugDirectories.parse(rd, numberOfEntries)
[ "def", "_parseDebugDirectory", "(", "self", ",", "rva", ",", "size", ",", "magic", "=", "consts", ".", "PE32", ")", ":", "debugDirData", "=", "self", ".", "getDataAtRva", "(", "rva", ",", "size", ")", "numberOfEntries", "=", "size", "/", "consts", ".", ...
Parses the C{IMAGE_DEBUG_DIRECTORY} directory. @see: U{http://msdn.microsoft.com/es-es/library/windows/desktop/ms680307(v=vs.85).aspx} @type rva: int @param rva: The RVA where the C{IMAGE_DEBUG_DIRECTORY} directory starts. @type size: int @param size: The size of the C{IMAGE_DEBUG_DIRECTORY} directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageDebugDirectory} @return: A new L{ImageDebugDirectory} object.
[ "Parses", "the", "C", "{", "IMAGE_DEBUG_DIRECTORY", "}", "directory", "." ]
python
train
walkr/nanoservice
nanoservice/core.py
https://github.com/walkr/nanoservice/blob/e2098986b1baa5f283167ae487d14f3c6c21961a/nanoservice/core.py#L105-L111
def receive(self, decode=True): """ Receive from socket, authenticate and decode payload """ payload = self.socket.recv() payload = self.verify(payload) if decode: payload = self.decode(payload) return payload
[ "def", "receive", "(", "self", ",", "decode", "=", "True", ")", ":", "payload", "=", "self", ".", "socket", ".", "recv", "(", ")", "payload", "=", "self", ".", "verify", "(", "payload", ")", "if", "decode", ":", "payload", "=", "self", ".", "decode...
Receive from socket, authenticate and decode payload
[ "Receive", "from", "socket", "authenticate", "and", "decode", "payload" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/mcverry_2006.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/mcverry_2006.py#L179-L201
def _compute_mean_on_rock(self, C, mag, rrup, rvol, hypo_depth, CN, CR, f4HW): """ Compute mean value on site class A/B (equation 1 on page 22) """ lnSA_AB = ( # line 1 of equation 1 C['c1'] + C['c4as'] * (mag - 6) + # line 2 C['c3as'] * (8.5 - mag) ** 2 + # line 3 C['c5'] * rrup + # line 3 and 4 (C['c8'] + C['c6as'] * (mag - 6)) * np.log((rrup ** 2 + C['c10as'] ** 2) ** 0.5) + # line 5 C['c46'] * rvol + # line 6 C['c32'] * CN + C['c33as'] * CR + f4HW ) return lnSA_AB
[ "def", "_compute_mean_on_rock", "(", "self", ",", "C", ",", "mag", ",", "rrup", ",", "rvol", ",", "hypo_depth", ",", "CN", ",", "CR", ",", "f4HW", ")", ":", "lnSA_AB", "=", "(", "# line 1 of equation 1", "C", "[", "'c1'", "]", "+", "C", "[", "'c4as'"...
Compute mean value on site class A/B (equation 1 on page 22)
[ "Compute", "mean", "value", "on", "site", "class", "A", "/", "B", "(", "equation", "1", "on", "page", "22", ")" ]
python
train
jim-easterbrook/pyctools
src/pyctools/core/frame.py
https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/frame.py#L201-L229
def from_file(self, path): """Read metadata from an XMP sidecar file or, if there is no sidecar, from the image/video file (if it has metadata). Returns the :py:class:`Metadata` object, allowing convenient code like this:: md = Metadata().from_file(path) :param str path: The image/video file path name. :rtype: :py:class:`Metadata` """ for xmp_path in (path + '.xmp', path): md = GExiv2.Metadata() try: md.open_path(xmp_path) except GLib.GError: continue for tag in (md.get_exif_tags() + md.get_iptc_tags() + md.get_xmp_tags()): if md.get_tag_type(tag) in ('XmpBag', 'XmpSeq'): self.data[tag] = md.get_tag_multiple(tag) else: self.data[tag] = md.get_tag_string(tag) self.comment = md.get_comment() break return self
[ "def", "from_file", "(", "self", ",", "path", ")", ":", "for", "xmp_path", "in", "(", "path", "+", "'.xmp'", ",", "path", ")", ":", "md", "=", "GExiv2", ".", "Metadata", "(", ")", "try", ":", "md", ".", "open_path", "(", "xmp_path", ")", "except", ...
Read metadata from an XMP sidecar file or, if there is no sidecar, from the image/video file (if it has metadata). Returns the :py:class:`Metadata` object, allowing convenient code like this:: md = Metadata().from_file(path) :param str path: The image/video file path name. :rtype: :py:class:`Metadata`
[ "Read", "metadata", "from", "an", "XMP", "sidecar", "file", "or", "if", "there", "is", "no", "sidecar", "from", "the", "image", "/", "video", "file", "(", "if", "it", "has", "metadata", ")", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/api/__init__.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/__init__.py#L88-L141
def rison(schema=None): """ Use this decorator to parse URI *Rison* arguments to a python data structure, your method gets the data structure on kwargs['rison']. Response is HTTP 400 if *Rison* is not correct:: class ExampleApi(BaseApi): @expose('/risonjson') @rison() def rison_json(self, **kwargs): return self.response(200, result=kwargs['rison']) You can additionally pass a JSON schema to validate Rison arguments:: schema = { "type": "object", "properties": { "arg1": { "type": "integer" } } } class ExampleApi(BaseApi): @expose('/risonjson') @rison(schema) def rison_json(self, **kwargs): return self.response(200, result=kwargs['rison']) """ def _rison(f): def wraps(self, *args, **kwargs): value = request.args.get(API_URI_RIS_KEY, None) kwargs["rison"] = dict() if value: try: kwargs["rison"] = prison.loads(value) except prison.decoder.ParserException: return self.response_400(message="Not a valid rison argument") if schema: try: jsonschema.validate(instance=kwargs["rison"], schema=schema) except jsonschema.ValidationError as e: return self.response_400( message="Not a valid rison schema {}".format(e) ) return f(self, *args, **kwargs) return functools.update_wrapper(wraps, f) return _rison
[ "def", "rison", "(", "schema", "=", "None", ")", ":", "def", "_rison", "(", "f", ")", ":", "def", "wraps", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "request", ".", "args", ".", "get", "(", "API_URI_RIS_KEY",...
Use this decorator to parse URI *Rison* arguments to a python data structure, your method gets the data structure on kwargs['rison']. Response is HTTP 400 if *Rison* is not correct:: class ExampleApi(BaseApi): @expose('/risonjson') @rison() def rison_json(self, **kwargs): return self.response(200, result=kwargs['rison']) You can additionally pass a JSON schema to validate Rison arguments:: schema = { "type": "object", "properties": { "arg1": { "type": "integer" } } } class ExampleApi(BaseApi): @expose('/risonjson') @rison(schema) def rison_json(self, **kwargs): return self.response(200, result=kwargs['rison'])
[ "Use", "this", "decorator", "to", "parse", "URI", "*", "Rison", "*", "arguments", "to", "a", "python", "data", "structure", "your", "method", "gets", "the", "data", "structure", "on", "kwargs", "[", "rison", "]", ".", "Response", "is", "HTTP", "400", "if...
python
train
amzn/ion-python
amazon/ion/writer_binary_raw_fields.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/writer_binary_raw_fields.py#L147-L185
def _write_base(buf, value, bits_per_octet, end_bit=0, sign_bit=0, is_signed=False): """Write a field to the provided buffer. Args: buf (Sequence): The buffer into which the UInt will be written in the form of integer octets. value (int): The value to write as a UInt. bits_per_octet (int): The number of value bits (i.e. exclusive of the end bit, but inclusive of the sign bit, if applicable) per octet. end_bit (Optional[int]): The end bit mask. sign_bit (Optional[int]): The sign bit mask. Returns: int: The number of octets written. """ if value == 0: buf.append(sign_bit | end_bit) return 1 num_bits = bit_length(value) num_octets = num_bits // bits_per_octet # 'remainder' is the number of value bits in the first octet. remainder = num_bits % bits_per_octet if remainder != 0 or is_signed: # If signed, the first octet has one fewer bit available, requiring another octet. num_octets += 1 else: # This ensures that unsigned values that fit exactly are not shifted too far. remainder = bits_per_octet for i in range(num_octets): octet = 0 if i == 0: octet |= sign_bit if i == num_octets - 1: octet |= end_bit # 'remainder' is used for alignment such that only the first octet # may contain insignificant zeros. octet |= ((value >> (num_bits - (remainder + bits_per_octet * i))) & _OCTET_MASKS[bits_per_octet]) buf.append(octet) return num_octets
[ "def", "_write_base", "(", "buf", ",", "value", ",", "bits_per_octet", ",", "end_bit", "=", "0", ",", "sign_bit", "=", "0", ",", "is_signed", "=", "False", ")", ":", "if", "value", "==", "0", ":", "buf", ".", "append", "(", "sign_bit", "|", "end_bit"...
Write a field to the provided buffer. Args: buf (Sequence): The buffer into which the UInt will be written in the form of integer octets. value (int): The value to write as a UInt. bits_per_octet (int): The number of value bits (i.e. exclusive of the end bit, but inclusive of the sign bit, if applicable) per octet. end_bit (Optional[int]): The end bit mask. sign_bit (Optional[int]): The sign bit mask. Returns: int: The number of octets written.
[ "Write", "a", "field", "to", "the", "provided", "buffer", "." ]
python
train
saltstack/salt
salt/modules/yumpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L1949-L2033
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613 ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any yum/dnf commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages name The name of the package to be removed Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [] for target in pkg_params: # Check if package version set to be removed is actually installed: # old[target] contains a comma-separated list of installed versions if target in old and not pkg_params[target]: targets.append(target) elif target in old and pkg_params[target] in old[target].split(','): arch = '' pkgname = target try: namepart, archpart = target.rsplit('.', 1) except ValueError: pass else: if archpart in salt.utils.pkg.rpm.ARCHES: arch = '.' + archpart pkgname = namepart targets.append('{0}-{1}{2}'.format(pkgname, pkg_params[target], arch)) if not targets: return {} out = _call_yum(['-y', 'remove'] + targets) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Error occurred removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
[ "def", "remove", "(", "name", "=", "None", ",", "pkgs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=W0613", "try", ":", "pkg_params", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ")", "[", "0...
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any yum/dnf commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages name The name of the package to be removed Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
[ "..", "versionchanged", "::", "2015", ".", "8", ".", "12", "2016", ".", "3", ".", "3", "2016", ".", "11", ".", "0", "On", "minions", "running", "systemd", ">", "=", "205", "systemd", "-", "run", "(", "1", ")", "_", "is", "now", "used", "to", "i...
python
train
Becksteinlab/GromacsWrapper
gromacs/config.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/config.py#L551-L559
def getLogLevel(self, section, option): """Return the textual representation of logging level 'option' or the number. Note that option is always interpreted as an UPPERCASE string and hence integer log levels will not be recognized. .. SeeAlso: :mod:`logging` and :func:`logging.getLevelName` """ return logging.getLevelName(self.get(section, option).upper())
[ "def", "getLogLevel", "(", "self", ",", "section", ",", "option", ")", ":", "return", "logging", ".", "getLevelName", "(", "self", ".", "get", "(", "section", ",", "option", ")", ".", "upper", "(", ")", ")" ]
Return the textual representation of logging level 'option' or the number. Note that option is always interpreted as an UPPERCASE string and hence integer log levels will not be recognized. .. SeeAlso: :mod:`logging` and :func:`logging.getLevelName`
[ "Return", "the", "textual", "representation", "of", "logging", "level", "option", "or", "the", "number", "." ]
python
valid
tjcsl/cslbot
cslbot/commands/slogan.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/slogan.py#L23-L31
def cmd(send, msg, _): """Gets a slogan. Syntax: {command} [text] """ if not msg: msg = textutils.gen_word() send(textutils.gen_slogan(msg))
[ "def", "cmd", "(", "send", ",", "msg", ",", "_", ")", ":", "if", "not", "msg", ":", "msg", "=", "textutils", ".", "gen_word", "(", ")", "send", "(", "textutils", ".", "gen_slogan", "(", "msg", ")", ")" ]
Gets a slogan. Syntax: {command} [text]
[ "Gets", "a", "slogan", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/EnvironmentVIP.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/EnvironmentVIP.py#L62-L86
def list_all_available(self, id_vlan): """ List all environment vips availables :return: Following dictionary: :: {'environment_vip': [{'id': <id>, 'finalidade_txt': <finalidade_txt>, 'cliente_txt': <cliente_txt>, 'ambiente_p44_txt': <ambiente_p44_txt> } {... other environments vip ...}]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ url = 'environmentvip/search/' + str(id_vlan) code, xml = self.submit(None, 'GET', url) key = 'environment_vip' return get_list_map(self.response(code, xml, [key]), key)
[ "def", "list_all_available", "(", "self", ",", "id_vlan", ")", ":", "url", "=", "'environmentvip/search/'", "+", "str", "(", "id_vlan", ")", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'GET'", ",", "url", ")", "key", "=", "'envir...
List all environment vips availables :return: Following dictionary: :: {'environment_vip': [{'id': <id>, 'finalidade_txt': <finalidade_txt>, 'cliente_txt': <cliente_txt>, 'ambiente_p44_txt': <ambiente_p44_txt> } {... other environments vip ...}]} :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "List", "all", "environment", "vips", "availables" ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/element_methods.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/element_methods.py#L45-L70
def append_all_below(destination, source, join_str=None): """ Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail attributes of elements is an oddity. It can even be a little frustrating when one is attempting to copy everything underneath some element to another element; one has to write in extra code to handle the text. This method provides the functionality of adding everything underneath the source element, in preserved order, to the destination element. """ if join_str is None: join_str = ' ' if source.text is not None: # If source has text if len(destination) == 0: # Destination has no children if destination.text is None: # Destination has no text destination.text = source.text else: # Destination has a text destination.text = join_str.join([destination.text, source.text]) else: # Destination has children #Select last child last = destination[-1] if last.tail is None: # Last child has no tail last.tail = source.text else: # Last child has a tail last.tail = join_str.join([last.tail, source.text]) for each_child in source: destination.append(deepcopy(each_child))
[ "def", "append_all_below", "(", "destination", ",", "source", ",", "join_str", "=", "None", ")", ":", "if", "join_str", "is", "None", ":", "join_str", "=", "' '", "if", "source", ".", "text", "is", "not", "None", ":", "# If source has text", "if", "len", ...
Compared to xml.dom.minidom, lxml's treatment of text as .text and .tail attributes of elements is an oddity. It can even be a little frustrating when one is attempting to copy everything underneath some element to another element; one has to write in extra code to handle the text. This method provides the functionality of adding everything underneath the source element, in preserved order, to the destination element.
[ "Compared", "to", "xml", ".", "dom", ".", "minidom", "lxml", "s", "treatment", "of", "text", "as", ".", "text", "and", ".", "tail", "attributes", "of", "elements", "is", "an", "oddity", ".", "It", "can", "even", "be", "a", "little", "frustrating", "whe...
python
train
chrippa/ds4drv
ds4drv/uinput.py
https://github.com/chrippa/ds4drv/blob/be7327fc3f5abb8717815f2a1a2ad3d335535d8a/ds4drv/uinput.py#L311-L344
def emit(self, report): """Writes axes, buttons and hats with values from the report to the device.""" for name, attr in self.layout.axes.items(): value = getattr(report, attr) self.write_event(ecodes.EV_ABS, name, value) for name, attr in self.layout.buttons.items(): attr, modifier = attr if attr in self.ignored_buttons: value = False else: value = getattr(report, attr) if modifier and "analog" in attr: if modifier == "+": value = value > (128 + DEFAULT_A2D_DEADZONE) elif modifier == "-": value = value < (128 - DEFAULT_A2D_DEADZONE) self.write_event(ecodes.EV_KEY, name, value) for name, attr in self.layout.hats.items(): if getattr(report, attr[0]): value = -1 elif getattr(report, attr[1]): value = 1 else: value = 0 self.write_event(ecodes.EV_ABS, name, value) self.device.syn()
[ "def", "emit", "(", "self", ",", "report", ")", ":", "for", "name", ",", "attr", "in", "self", ".", "layout", ".", "axes", ".", "items", "(", ")", ":", "value", "=", "getattr", "(", "report", ",", "attr", ")", "self", ".", "write_event", "(", "ec...
Writes axes, buttons and hats with values from the report to the device.
[ "Writes", "axes", "buttons", "and", "hats", "with", "values", "from", "the", "report", "to", "the", "device", "." ]
python
train
chrisspen/burlap
burlap/mysql.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L518-L541
def query(query, use_sudo=True, **kwargs): """ Run a MySQL query. """ func = use_sudo and run_as_root or run user = kwargs.get('mysql_user') or env.get('mysql_user') password = kwargs.get('mysql_password') or env.get('mysql_password') options = [ '--batch', '--raw', '--skip-column-names', ] if user: options.append('--user=%s' % quote(user)) if password: options.append('--password=%s' % quote(password)) options = ' '.join(options) return func('mysql %(options)s --execute=%(query)s' % { 'options': options, 'query': quote(query), })
[ "def", "query", "(", "query", ",", "use_sudo", "=", "True", ",", "*", "*", "kwargs", ")", ":", "func", "=", "use_sudo", "and", "run_as_root", "or", "run", "user", "=", "kwargs", ".", "get", "(", "'mysql_user'", ")", "or", "env", ".", "get", "(", "'...
Run a MySQL query.
[ "Run", "a", "MySQL", "query", "." ]
python
valid
log2timeline/plaso
plaso/parsers/winreg_plugins/msie_zones.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg_plugins/msie_zones.py#L158-L266
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): """Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. """ values_dict = {} if registry_key.number_of_values > 0: for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' if registry_value.DataIsString(): value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, registry_value.GetDataAsObject()) elif registry_value.DataIsInteger(): value_string = '[{0:s}] {1:d}'.format( registry_value.data_type_string, registry_value.GetDataAsObject()) elif registry_value.DataIsMultiString(): value_string = '[{0:s}] {1:s}'.format( registry_value.data_type_string, ''.join( registry_value.GetDataAsObject())) else: value_string = '[{0:s}]'.format(registry_value.data_type_string) values_dict[value_name] = value_string # Generate at least one event object for the key. event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if registry_key.number_of_subkeys == 0: error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path) parser_mediator.ProduceExtractionWarning(error_string) return for zone_key in registry_key.GetSubkeys(): # TODO: these values are stored in the Description value of the # zone key. This solution will break on zone values that are larger # than 5. path = '{0:s}\\{1:s}'.format( registry_key.path, self._ZONE_NAMES[zone_key.name]) values_dict = {} # TODO: this plugin currently just dumps the values and does not # distinguish between what is a feature control or not. for value in zone_key.GetValues(): # Ignore the default value. if not value.name: continue if value.DataIsString(): value_string = value.GetDataAsObject() elif value.DataIsInteger(): value_integer = value.GetDataAsObject() if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES: value_string = self._CONTROL_VALUES_PERMISSIONS.get( value_integer, 'UNKNOWN') elif value.name == '1A00': value_string = self._CONTROL_VALUES_1A00.get( value_integer, 'UNKNOWN') elif value.name == '1C00': value_string = self._CONTROL_VALUES_1C00.get( value_integer, 'UNKNOWN') elif value.name == '1E05': value_string = self._CONTROL_VALUES_SAFETY.get( value_integer, 'UNKNOWN') else: value_string = '{0:d}'.format(value_integer) else: value_string = '[{0:s}]'.format(value.data_type_string) if len(value.name) == 4 and value.name != 'Icon': value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN') else: value_description = self._FEATURE_CONTROLS.get(value.name, '') if value_description: feature_control = '[{0:s}] {1:s}'.format( value.name, value_description) else: feature_control = '[{0:s}]'.format(value.name) values_dict[feature_control] = value_string event_data = windows_events.WindowsRegistryEventData() event_data.key_path = path event_data.offset = zone_key.offset event_data.regvalue = values_dict event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "ExtractEvents", "(", "self", ",", "parser_mediator", ",", "registry_key", ",", "*", "*", "kwargs", ")", ":", "values_dict", "=", "{", "}", "if", "registry_key", ".", "number_of_values", ">", "0", ":", "for", "registry_value", "in", "registry_key", "....
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
[ "Extracts", "events", "from", "a", "Windows", "Registry", "key", "." ]
python
train
RedHatInsights/insights-core
insights/client/connection.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L565-L602
def _legacy_api_registration_check(self): ''' Check registration status through API ''' logger.debug('Checking registration status...') machine_id = generate_machine_id() try: url = self.api_url + '/v1/systems/' + machine_id net_logger.info("GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) except requests.ConnectionError: # can't connect, run connection test logger.error('Connection timed out. Running connection test...') self.test_connection() return False # had to do a quick bugfix changing this around, # which makes the None-False-True dichotomy seem weird # TODO: reconsider what gets returned, probably this: # True for registered # False for unregistered # None for system 404 try: # check the 'unregistered_at' key of the response unreg_status = json.loads(res.content).get('unregistered_at', 'undefined') # set the global account number self.config.account_number = json.loads(res.content).get('account_number', 'undefined') except ValueError: # bad response, no json object return False if unreg_status == 'undefined': # key not found, machine not yet registered return None elif unreg_status is None: # unregistered_at = null, means this machine IS registered return True else: # machine has been unregistered, this is a timestamp return unreg_status
[ "def", "_legacy_api_registration_check", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Checking registration status...'", ")", "machine_id", "=", "generate_machine_id", "(", ")", "try", ":", "url", "=", "self", ".", "api_url", "+", "'/v1/systems/'", "+", ...
Check registration status through API
[ "Check", "registration", "status", "through", "API" ]
python
train
Robpol86/libnl
libnl/attr.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/attr.py#L552-L566
def nla_get_u64(nla): """Return value of 64 bit integer attribute as an int(). https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649 Positional arguments: nla -- 64 bit integer attribute (nlattr class instance). Returns: Payload as an int(). """ tmp = c_uint64(0) if nla and nla_len(nla) >= sizeof(tmp): tmp = c_uint64.from_buffer(nla_data(nla)[:SIZEOF_U64]) return int(tmp.value)
[ "def", "nla_get_u64", "(", "nla", ")", ":", "tmp", "=", "c_uint64", "(", "0", ")", "if", "nla", "and", "nla_len", "(", "nla", ")", ">=", "sizeof", "(", "tmp", ")", ":", "tmp", "=", "c_uint64", ".", "from_buffer", "(", "nla_data", "(", "nla", ")", ...
Return value of 64 bit integer attribute as an int(). https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649 Positional arguments: nla -- 64 bit integer attribute (nlattr class instance). Returns: Payload as an int().
[ "Return", "value", "of", "64", "bit", "integer", "attribute", "as", "an", "int", "()", "." ]
python
train
oasis-open/cti-pattern-validator
stix2patterns/pattern.py
https://github.com/oasis-open/cti-pattern-validator/blob/753a6901120db25f0c8550607de1eab4440d59df/stix2patterns/pattern.py#L56-L117
def __do_parse(self, pattern_str): """ Parses the given pattern and returns the antlr parse tree. :param pattern_str: The STIX pattern :return: The parse tree :raises ParseException: If there is a parse error """ in_ = antlr4.InputStream(pattern_str) lexer = STIXPatternLexer(in_) lexer.removeErrorListeners() # remove the default "console" listener token_stream = antlr4.CommonTokenStream(lexer) parser = STIXPatternParser(token_stream) parser.removeErrorListeners() # remove the default "console" listener error_listener = ParserErrorListener() parser.addErrorListener(error_listener) # I found no public API for this... # The default error handler tries to keep parsing, and I don't # think that's appropriate here. (These error handlers are only for # handling the built-in RecognitionException errors.) parser._errHandler = antlr4.BailErrorStrategy() # To improve error messages, replace "<INVALID>" in the literal # names with symbolic names. This is a hack, but seemed like # the simplest workaround. for i, lit_name in enumerate(parser.literalNames): if lit_name == u"<INVALID>": parser.literalNames[i] = parser.symbolicNames[i] # parser.setTrace(True) try: tree = parser.pattern() # print(tree.toStringTree(recog=parser)) return tree except antlr4.error.Errors.ParseCancellationException as e: # The cancellation exception wraps the real RecognitionException # which caused the parser to bail. real_exc = e.args[0] # I want to bail when the first error is hit. But I also want # a decent error message. When an error is encountered in # Parser.match(), the BailErrorStrategy produces the # ParseCancellationException. It is not a subclass of # RecognitionException, so none of the 'except' clauses which would # normally report an error are invoked. # # Error message creation is buried in the ErrorStrategy, and I can # (ab)use the API to get a message: register an error listener with # the parser, force an error report, then get the message out of the # listener. Error listener registration is above; now we force its # invocation. Wish this could be cleaner... parser._errHandler.reportError(parser, real_exc) # should probably chain exceptions if we can... # Should I report the cancellation or recognition exception as the # cause...? six.raise_from(ParseException(error_listener.error_message), real_exc)
[ "def", "__do_parse", "(", "self", ",", "pattern_str", ")", ":", "in_", "=", "antlr4", ".", "InputStream", "(", "pattern_str", ")", "lexer", "=", "STIXPatternLexer", "(", "in_", ")", "lexer", ".", "removeErrorListeners", "(", ")", "# remove the default \"console\...
Parses the given pattern and returns the antlr parse tree. :param pattern_str: The STIX pattern :return: The parse tree :raises ParseException: If there is a parse error
[ "Parses", "the", "given", "pattern", "and", "returns", "the", "antlr", "parse", "tree", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L642-L646
def earth_accel(RAW_IMU,ATTITUDE): '''return earth frame acceleration vector''' r = rotation(ATTITUDE) accel = Vector3(RAW_IMU.xacc, RAW_IMU.yacc, RAW_IMU.zacc) * 9.81 * 0.001 return r * accel
[ "def", "earth_accel", "(", "RAW_IMU", ",", "ATTITUDE", ")", ":", "r", "=", "rotation", "(", "ATTITUDE", ")", "accel", "=", "Vector3", "(", "RAW_IMU", ".", "xacc", ",", "RAW_IMU", ".", "yacc", ",", "RAW_IMU", ".", "zacc", ")", "*", "9.81", "*", "0.001...
return earth frame acceleration vector
[ "return", "earth", "frame", "acceleration", "vector" ]
python
train
xav-b/pyconsul
pyconsul/iron.py
https://github.com/xav-b/pyconsul/blob/06ce3b921d01010c19643424486bea4b22196076/pyconsul/iron.py#L36-L40
def available(self): ''' Check if a related database exists ''' return self.db_name in map( lambda x: x['name'], self._db.get_database_list() )
[ "def", "available", "(", "self", ")", ":", "return", "self", ".", "db_name", "in", "map", "(", "lambda", "x", ":", "x", "[", "'name'", "]", ",", "self", ".", "_db", ".", "get_database_list", "(", ")", ")" ]
Check if a related database exists
[ "Check", "if", "a", "related", "database", "exists" ]
python
train
jf-parent/brome
brome/runner/localhost_instance.py
https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/runner/localhost_instance.py#L49-L67
def execute_command(self, command): """Execute a command Args: command (str) Returns: process (object) """ self.runner.info_log("Executing command: %s" % command) process = Popen( command, stdout=open(os.devnull, 'w'), stderr=open('runner.log', 'a'), ) return process
[ "def", "execute_command", "(", "self", ",", "command", ")", ":", "self", ".", "runner", ".", "info_log", "(", "\"Executing command: %s\"", "%", "command", ")", "process", "=", "Popen", "(", "command", ",", "stdout", "=", "open", "(", "os", ".", "devnull", ...
Execute a command Args: command (str) Returns: process (object)
[ "Execute", "a", "command" ]
python
train
wbond/asn1crypto
asn1crypto/x509.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L1139-L1147
def sha1(self): """ :return: The SHA1 hash of the DER-encoded bytes of this name """ if self._sha1 is None: self._sha1 = hashlib.sha1(self.dump()).digest() return self._sha1
[ "def", "sha1", "(", "self", ")", ":", "if", "self", ".", "_sha1", "is", "None", ":", "self", ".", "_sha1", "=", "hashlib", ".", "sha1", "(", "self", ".", "dump", "(", ")", ")", ".", "digest", "(", ")", "return", "self", ".", "_sha1" ]
:return: The SHA1 hash of the DER-encoded bytes of this name
[ ":", "return", ":", "The", "SHA1", "hash", "of", "the", "DER", "-", "encoded", "bytes", "of", "this", "name" ]
python
train
hsolbrig/pyjsg
pyjsg/parser_impl/jsg_valuetype_parser.py
https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_valuetype_parser.py#L131-L136
def visitValueType(self, ctx: jsgParser.ValueTypeContext): """ valueType: idref | nonRefValueType """ if ctx.idref(): self._typeid = as_token(ctx) else: self.visitChildren(ctx)
[ "def", "visitValueType", "(", "self", ",", "ctx", ":", "jsgParser", ".", "ValueTypeContext", ")", ":", "if", "ctx", ".", "idref", "(", ")", ":", "self", ".", "_typeid", "=", "as_token", "(", "ctx", ")", "else", ":", "self", ".", "visitChildren", "(", ...
valueType: idref | nonRefValueType
[ "valueType", ":", "idref", "|", "nonRefValueType" ]
python
train
apache/incubator-mxnet
example/ssd/evaluate/eval_metric.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/evaluate/eval_metric.py#L197-L212
def _update(self): """ update num_inst and sum_metric """ aps = [] for k, v in self.records.items(): recall, prec = self._recall_prec(v, self.counts[k]) ap = self._average_precision(recall, prec) aps.append(ap) if self.num is not None and k < (self.num - 1): self.sum_metric[k] = ap self.num_inst[k] = 1 if self.num is None: self.num_inst = 1 self.sum_metric = np.mean(aps) else: self.num_inst[-1] = 1 self.sum_metric[-1] = np.mean(aps)
[ "def", "_update", "(", "self", ")", ":", "aps", "=", "[", "]", "for", "k", ",", "v", "in", "self", ".", "records", ".", "items", "(", ")", ":", "recall", ",", "prec", "=", "self", ".", "_recall_prec", "(", "v", ",", "self", ".", "counts", "[", ...
update num_inst and sum_metric
[ "update", "num_inst", "and", "sum_metric" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/type.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/type.py#L202-L211
def is_derived (type, base): """ Returns true if 'type' is 'base' or has 'base' as its direct or indirect base. """ assert isinstance(type, basestring) assert isinstance(base, basestring) # TODO: this isn't very efficient, especially for bases close to type if base in all_bases (type): return True else: return False
[ "def", "is_derived", "(", "type", ",", "base", ")", ":", "assert", "isinstance", "(", "type", ",", "basestring", ")", "assert", "isinstance", "(", "base", ",", "basestring", ")", "# TODO: this isn't very efficient, especially for bases close to type", "if", "base", ...
Returns true if 'type' is 'base' or has 'base' as its direct or indirect base.
[ "Returns", "true", "if", "type", "is", "base", "or", "has", "base", "as", "its", "direct", "or", "indirect", "base", "." ]
python
train
yamins81/tabular
tabular/spreadsheet.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/spreadsheet.py#L477-L502
def grayspec(k): """ List of gray-scale colors in HSV space as web hex triplets. For integer argument k, returns list of `k` gray-scale colors, increasingly light, linearly in the HSV color space, as web hex triplets. Technical dependency of :func:`tabular.spreadsheet.aggregate_in`. **Parameters** **k** : positive integer Number of gray-scale colors to return. **Returns** **glist** : list of strings List of `k` gray-scale colors. """ ll = .5 ul = .8 delta = (ul - ll) / k return [GrayScale(t) for t in np.arange(ll, ul, delta)]
[ "def", "grayspec", "(", "k", ")", ":", "ll", "=", ".5", "ul", "=", ".8", "delta", "=", "(", "ul", "-", "ll", ")", "/", "k", "return", "[", "GrayScale", "(", "t", ")", "for", "t", "in", "np", ".", "arange", "(", "ll", ",", "ul", ",", "delta"...
List of gray-scale colors in HSV space as web hex triplets. For integer argument k, returns list of `k` gray-scale colors, increasingly light, linearly in the HSV color space, as web hex triplets. Technical dependency of :func:`tabular.spreadsheet.aggregate_in`. **Parameters** **k** : positive integer Number of gray-scale colors to return. **Returns** **glist** : list of strings List of `k` gray-scale colors.
[ "List", "of", "gray", "-", "scale", "colors", "in", "HSV", "space", "as", "web", "hex", "triplets", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_hardware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_hardware.py#L109-L119
def hardware_connector_group_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware") connector_group = ET.SubElement(hardware, "connector-group") id = ET.SubElement(connector_group, "id") id.text = kwargs.pop('id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hardware_connector_group_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hardware", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hardware\"", ",", "xmlns", "=", "\"urn:broca...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
wummel/linkchecker
third_party/dnspython/dns/resolver.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/resolver.py#L655-L797
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True): """Query nameservers to find the answer to the question. The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects of the appropriate type, or strings that can be converted into objects of the appropriate type. E.g. For I{rdtype} the integer 2 and the the string 'NS' both mean to query for records with DNS rdata type NS. @param qname: the query name @type qname: dns.name.Name object or string @param rdtype: the query type @type rdtype: int or string @param rdclass: the query class @type rdclass: int or string @param tcp: use TCP to make the query (default is False). @type tcp: bool @param source: bind to this IP address (defaults to machine default IP). @type source: IP address in dotted quad notation @param raise_on_no_answer: raise NoAnswer if there's no answer (defaults is True). @type raise_on_no_answer: bool @rtype: dns.resolver.Answer instance @raises Timeout: no answers could be found in the specified lifetime @raises NXDOMAIN: the query name does not exist @raises NoAnswer: the response did not contain an answer and raise_on_no_answer is True. @raises NoNameservers: no non-broken nameservers are available to answer the question.""" if isinstance(qname, basestring): qname = dns.name.from_text(qname, None) if isinstance(rdtype, basestring): rdtype = dns.rdatatype.from_text(rdtype) if dns.rdatatype.is_metatype(rdtype): raise NoMetaqueries if isinstance(rdclass, basestring): rdclass = dns.rdataclass.from_text(rdclass) if dns.rdataclass.is_metaclass(rdclass): raise NoMetaqueries qnames_to_try = [] if qname.is_absolute(): qnames_to_try.append(qname) else: if len(qname) > 1: qnames_to_try.append(qname.concatenate(dns.name.root)) if self.search: for suffix in self.search: qnames_to_try.append(qname.concatenate(suffix)) else: qnames_to_try.append(qname.concatenate(self.domain)) all_nxdomain = True start = time.time() for qname in qnames_to_try: if self.cache: answer = self.cache.get((qname, rdtype, rdclass)) if answer: return answer request = dns.message.make_query(qname, rdtype, rdclass) if not self.keyname is None: request.use_tsig(self.keyring, self.keyname, algorithm=self.keyalgorithm) request.use_edns(self.edns, self.ednsflags, self.payload) response = None # make a copy of the servers list so we can alter it later. nameservers = self.nameservers[:] backoff = 0.10 while response is None: if len(nameservers) == 0: raise NoNameservers("No DNS servers %s could answer the query %s" % \ (str(self.nameservers), str(qname))) for nameserver in nameservers[:]: timeout = self._compute_timeout(start) try: if tcp: response = dns.query.tcp(request, nameserver, timeout, self.port, source=source) else: response = dns.query.udp(request, nameserver, timeout, self.port, source=source) except (socket.error, dns.exception.Timeout): # Communication failure or timeout. Go to the # next server response = None continue except dns.query.UnexpectedSource: # Who knows? Keep going. response = None continue except dns.exception.FormError: # We don't understand what this server is # saying. Take it out of the mix and # continue. nameservers.remove(nameserver) response = None continue rcode = response.rcode() if rcode == dns.rcode.NOERROR or \ rcode == dns.rcode.NXDOMAIN: break # We got a response, but we're not happy with the # rcode in it. Remove the server from the mix if # the rcode isn't SERVFAIL. if rcode != dns.rcode.SERVFAIL: nameservers.remove(nameserver) response = None if response is not None: break # All nameservers failed! if len(nameservers) > 0: # But we still have servers to try. Sleep a bit # so we don't pound them! timeout = self._compute_timeout(start) sleep_time = min(timeout, backoff) backoff *= 2 time.sleep(sleep_time) if response.rcode() == dns.rcode.NXDOMAIN: continue all_nxdomain = False break if all_nxdomain: raise NXDOMAIN("Domain does not exist") answer = Answer(qname, rdtype, rdclass, response, raise_on_no_answer) if self.cache: self.cache.put((qname, rdtype, rdclass), answer) return answer
[ "def", "query", "(", "self", ",", "qname", ",", "rdtype", "=", "dns", ".", "rdatatype", ".", "A", ",", "rdclass", "=", "dns", ".", "rdataclass", ".", "IN", ",", "tcp", "=", "False", ",", "source", "=", "None", ",", "raise_on_no_answer", "=", "True", ...
Query nameservers to find the answer to the question. The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects of the appropriate type, or strings that can be converted into objects of the appropriate type. E.g. For I{rdtype} the integer 2 and the the string 'NS' both mean to query for records with DNS rdata type NS. @param qname: the query name @type qname: dns.name.Name object or string @param rdtype: the query type @type rdtype: int or string @param rdclass: the query class @type rdclass: int or string @param tcp: use TCP to make the query (default is False). @type tcp: bool @param source: bind to this IP address (defaults to machine default IP). @type source: IP address in dotted quad notation @param raise_on_no_answer: raise NoAnswer if there's no answer (defaults is True). @type raise_on_no_answer: bool @rtype: dns.resolver.Answer instance @raises Timeout: no answers could be found in the specified lifetime @raises NXDOMAIN: the query name does not exist @raises NoAnswer: the response did not contain an answer and raise_on_no_answer is True. @raises NoNameservers: no non-broken nameservers are available to answer the question.
[ "Query", "nameservers", "to", "find", "the", "answer", "to", "the", "question", "." ]
python
train
gitpython-developers/GitPython
git/refs/symbolic.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/refs/symbolic.py#L141-L177
def _get_ref_info_helper(cls, repo, ref_path): """Return: (str(sha), str(target_ref_path)) if available, the sha the file at rela_path points to, or None. target_ref_path is the reference we point to, or None""" tokens = None repodir = _git_dir(repo, ref_path) try: with open(osp.join(repodir, ref_path), 'rt') as fp: value = fp.read().rstrip() # Don't only split on spaces, but on whitespace, which allows to parse lines like # 60b64ef992065e2600bfef6187a97f92398a9144 branch 'master' of git-server:/path/to/repo tokens = value.split() assert(len(tokens) != 0) except (OSError, IOError): # Probably we are just packed, find our entry in the packed refs file # NOTE: We are not a symbolic ref if we are in a packed file, as these # are excluded explicitly for sha, path in cls._iter_packed_refs(repo): if path != ref_path: continue # sha will be used tokens = sha, path break # END for each packed ref # END handle packed refs if tokens is None: raise ValueError("Reference at %r does not exist" % ref_path) # is it a reference ? if tokens[0] == 'ref:': return (None, tokens[1]) # its a commit if repo.re_hexsha_only.match(tokens[0]): return (tokens[0], None) raise ValueError("Failed to parse reference information from %r" % ref_path)
[ "def", "_get_ref_info_helper", "(", "cls", ",", "repo", ",", "ref_path", ")", ":", "tokens", "=", "None", "repodir", "=", "_git_dir", "(", "repo", ",", "ref_path", ")", "try", ":", "with", "open", "(", "osp", ".", "join", "(", "repodir", ",", "ref_path...
Return: (str(sha), str(target_ref_path)) if available, the sha the file at rela_path points to, or None. target_ref_path is the reference we point to, or None
[ "Return", ":", "(", "str", "(", "sha", ")", "str", "(", "target_ref_path", "))", "if", "available", "the", "sha", "the", "file", "at", "rela_path", "points", "to", "or", "None", ".", "target_ref_path", "is", "the", "reference", "we", "point", "to", "or",...
python
train
ontio/ontology-python-sdk
ontology/io/binary_reader.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L55-L69
def read_byte(self, do_ord=True) -> int: """ Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred. """ try: if do_ord: return ord(self.stream.read(1)) else: return self.stream.read(1) except Exception as e: raise SDKException(ErrorCode.read_byte_error(e.args[0]))
[ "def", "read_byte", "(", "self", ",", "do_ord", "=", "True", ")", "->", "int", ":", "try", ":", "if", "do_ord", ":", "return", "ord", "(", "self", ".", "stream", ".", "read", "(", "1", ")", ")", "else", ":", "return", "self", ".", "stream", ".", ...
Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred.
[ "Read", "a", "single", "byte", ".", "Args", ":", "do_ord", "(", "bool", ")", ":", "(", "default", "True", ")", "convert", "the", "byte", "to", "an", "ordinal", "first", ".", "Returns", ":", "bytes", ":", "a", "single", "byte", "if", "successful", "."...
python
train
Parallels/artifactory
artifactory.py
https://github.com/Parallels/artifactory/blob/09ddcc4ae15095eec2347d39774c3f8aca6c4654/artifactory.py#L649-L662
def open(self, pathobj): """ Opens the remote file and returns a file-like object HTTPResponse Given the nature of HTTP streaming, this object doesn't support seek() """ url = str(pathobj) raw, code = self.rest_get_stream(url, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if not code == 200: raise RuntimeError("%d" % code) return raw
[ "def", "open", "(", "self", ",", "pathobj", ")", ":", "url", "=", "str", "(", "pathobj", ")", "raw", ",", "code", "=", "self", ".", "rest_get_stream", "(", "url", ",", "auth", "=", "pathobj", ".", "auth", ",", "verify", "=", "pathobj", ".", "verify...
Opens the remote file and returns a file-like object HTTPResponse Given the nature of HTTP streaming, this object doesn't support seek()
[ "Opens", "the", "remote", "file", "and", "returns", "a", "file", "-", "like", "object", "HTTPResponse", "Given", "the", "nature", "of", "HTTP", "streaming", "this", "object", "doesn", "t", "support", "seek", "()" ]
python
train
StanfordVL/robosuite
robosuite/models/base.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/base.py#L60-L83
def merge(self, other, merge_body=True): """ Default merge method. Args: other: another MujocoXML instance raises XML error if @other is not a MujocoXML instance. merges <worldbody/>, <actuator/> and <asset/> of @other into @self merge_body: True if merging child bodies of @other. Defaults to True. """ if not isinstance(other, MujocoXML): raise XMLError("{} is not a MujocoXML instance.".format(type(other))) if merge_body: for body in other.worldbody: self.worldbody.append(body) self.merge_asset(other) for one_actuator in other.actuator: self.actuator.append(one_actuator) for one_equality in other.equality: self.equality.append(one_equality) for one_contact in other.contact: self.contact.append(one_contact) for one_default in other.default: self.default.append(one_default)
[ "def", "merge", "(", "self", ",", "other", ",", "merge_body", "=", "True", ")", ":", "if", "not", "isinstance", "(", "other", ",", "MujocoXML", ")", ":", "raise", "XMLError", "(", "\"{} is not a MujocoXML instance.\"", ".", "format", "(", "type", "(", "oth...
Default merge method. Args: other: another MujocoXML instance raises XML error if @other is not a MujocoXML instance. merges <worldbody/>, <actuator/> and <asset/> of @other into @self merge_body: True if merging child bodies of @other. Defaults to True.
[ "Default", "merge", "method", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/classifiers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L148-L156
def batch_size(self, size): """ Sets the batch size, in case this classifier is a batch predictor. :param size: the size of the batch :type size: str """ if self.is_batchpredictor: javabridge.call(self.jobject, "setBatchSize", "(Ljava/lang/String;)V", size)
[ "def", "batch_size", "(", "self", ",", "size", ")", ":", "if", "self", ".", "is_batchpredictor", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"setBatchSize\"", ",", "\"(Ljava/lang/String;)V\"", ",", "size", ")" ]
Sets the batch size, in case this classifier is a batch predictor. :param size: the size of the batch :type size: str
[ "Sets", "the", "batch", "size", "in", "case", "this", "classifier", "is", "a", "batch", "predictor", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/rdfvalues/structs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L1395-L1407
def ConvertToWireFormat(self, value): """Convert to the wire format. Args: value: is of type RepeatedFieldHelper. Returns: A wire format representation of the value. """ output = _SerializeEntries( (python_format, wire_format, value.type_descriptor) for (python_format, wire_format) in value.wrapped_list) return b"", b"", output
[ "def", "ConvertToWireFormat", "(", "self", ",", "value", ")", ":", "output", "=", "_SerializeEntries", "(", "(", "python_format", ",", "wire_format", ",", "value", ".", "type_descriptor", ")", "for", "(", "python_format", ",", "wire_format", ")", "in", "value"...
Convert to the wire format. Args: value: is of type RepeatedFieldHelper. Returns: A wire format representation of the value.
[ "Convert", "to", "the", "wire", "format", "." ]
python
train
9b/google-alerts
google_alerts/__init__.py
https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L58-L85
def obfuscate(p, action): """Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally. """ key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH" s = list() if action == 'store': if PY2: for i in range(len(p)): kc = key[i % len(key)] ec = chr((ord(p[i]) + ord(kc)) % 256) s.append(ec) return base64.urlsafe_b64encode("".join(s)) else: return base64.urlsafe_b64encode(p.encode()).decode() else: if PY2: e = base64.urlsafe_b64decode(p) for i in range(len(e)): kc = key[i % len(key)] dc = chr((256 + ord(e[i]) - ord(kc)) % 256) s.append(dc) return "".join(s) else: e = base64.urlsafe_b64decode(p) return e.decode()
[ "def", "obfuscate", "(", "p", ",", "action", ")", ":", "key", "=", "\"ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH\"", "s", "=", "list", "(", ")", "if", "action", "==", "'store'", ":", "if", "PY2", ":", "for", "i", "in", "range", "(", "len", ...
Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally.
[ "Obfuscate", "the", "auth", "details", "to", "avoid", "easy", "snatching", "." ]
python
train
openstack/quark
quark/plugin_modules/subnets.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L364-L388
def get_subnet(context, id, fields=None): """Retrieve a subnet. : param context: neutron api request context : param id: UUID representing the subnet to fetch. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnet %s for tenant %s with fields %s" % (id, context.tenant_id, fields)) subnet = db_api.subnet_find(context=context, limit=None, page_reverse=False, sorts=['id'], marker_obj=None, fields=None, id=id, join_dns=True, join_routes=True, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=id) cache = subnet.get("_allocation_pool_cache") if not cache: new_cache = subnet.allocation_pools db_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache) return v._make_subnet_dict(subnet)
[ "def", "get_subnet", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_subnet %s for tenant %s with fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "subnet", "=", "db_a...
Retrieve a subnet. : param context: neutron api request context : param id: UUID representing the subnet to fetch. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
[ "Retrieve", "a", "subnet", "." ]
python
valid
gwpy/gwpy
gwpy/utils/lal.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/lal.py#L237-L251
def to_lal_ligotimegps(gps): """Convert the given GPS time to a `lal.LIGOTimeGPS` object Parameters ---------- gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str` input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps` Returns ------- ligotimegps : `lal.LIGOTimeGPS` a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time """ gps = to_gps(gps) return lal.LIGOTimeGPS(gps.gpsSeconds, gps.gpsNanoSeconds)
[ "def", "to_lal_ligotimegps", "(", "gps", ")", ":", "gps", "=", "to_gps", "(", "gps", ")", "return", "lal", ".", "LIGOTimeGPS", "(", "gps", ".", "gpsSeconds", ",", "gps", ".", "gpsNanoSeconds", ")" ]
Convert the given GPS time to a `lal.LIGOTimeGPS` object Parameters ---------- gps : `~gwpy.time.LIGOTimeGPS`, `float`, `str` input GPS time, can be anything parsable by :meth:`~gwpy.time.to_gps` Returns ------- ligotimegps : `lal.LIGOTimeGPS` a SWIG-LAL `~lal.LIGOTimeGPS` representation of the given GPS time
[ "Convert", "the", "given", "GPS", "time", "to", "a", "lal", ".", "LIGOTimeGPS", "object" ]
python
train
jeffbuttars/upkg
upkg/lib/search/Search.py
https://github.com/jeffbuttars/upkg/blob/7d65a0b2eb4469aac5856b963ef2d429f2920dae/upkg/lib/search/Search.py#L111-L131
def _fmt_tdelta(self, td): """todo: Docstring for _fmt_tdelta :param td: arg description :type td: type description :return: :rtype: """ rel = dateutil.relativedelta.relativedelta(datetime.datetime.utcnow(), td) if rel.years: return "{} Years and {} Months Ago".format(rel.years, rel.months) if rel.months: return "{} Months Ago".format(rel.months) if rel.days: return "{} Days Ago".format(rel.days) if rel.hours: return "{} Hours Ago".format(rel.hours) return "{} Minutes Ago".format(rel.minutes)
[ "def", "_fmt_tdelta", "(", "self", ",", "td", ")", ":", "rel", "=", "dateutil", ".", "relativedelta", ".", "relativedelta", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ",", "td", ")", "if", "rel", ".", "years", ":", "return", "\"{} Years...
todo: Docstring for _fmt_tdelta :param td: arg description :type td: type description :return: :rtype:
[ "todo", ":", "Docstring", "for", "_fmt_tdelta" ]
python
train
wilson-eft/wilson
wilson/translate/wet.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L417-L599
def _JMS_to_Fierz_III_IV_V(C, qqqq): """From JMS to 4-quark Fierz basis for Classes III, IV and V. `qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc.""" #case dduu classIII = ['sbuc', 'sbcu', 'dbuc', 'dbcu', 'dsuc', 'dscu'] classVdduu = ['sbuu' , 'dbuu', 'dsuu', 'sbcc' , 'dbcc', 'dscc'] if qqqq in classIII + classVdduu: f1 = dflav[qqqq[0]] f2 = dflav[qqqq[1]] f3 = uflav[qqqq[2]] f4 = uflav[qqqq[3]] return { 'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2] - C["V8udLL"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2, 'F' + qqqq + '3' : C["V1duLR"][f1, f2, f3, f4] - C["V8duLR"][f1, f2, f3, f4] / (2 * Nc), 'F' + qqqq + '4' : C["V8duLR"][f1, f2, f3, f4] / 2, 'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2] - C["S8udduRR"][f3, f2, f1, f4] / 4 - C["S8udRR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2 + C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc) + C["S8udRR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '7' : -C["V8udduLR"][f4, f1, f2, f3].conj(), 'F' + qqqq + '8' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj() + C["V8udduLR"][f4, f1, f2, f3].conj() / Nc, 'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16, 'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8 + C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc), 'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2] - C["V8udRR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '3p' : C["V1udLR"][f3, f4, f1, f2] - C["V8udLR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '4p' : C["V8udLR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() - C["S8udduRR"][f4, f1, f2, f3].conj() / 4 - C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc), 'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 + C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc) + C["S8udRR"][f4, f3, f2, f1].conj() / 2, 'F' + qqqq + '7p' : -C["V8udduLR"][f3, f2, f1, f4], 'F' + qqqq + '8p' : - 2 * C["V1udduLR"][f3, f2, f1, f4] + C["V8udduLR"][f3, f2, f1, f4] / Nc, 'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16, 'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8 + C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc } classVuudd = ['ucdd', 'ucss', 'ucbb'] if qqqq in classVuudd: f3 = uflav[qqqq[0]] f4 = uflav[qqqq[1]] f1 = dflav[qqqq[2]] f2 = dflav[qqqq[3]] return { 'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2] - C["V8udLL"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2, 'F' + qqqq + '3p' : C["V1duLR"][f1, f2, f3, f4] - C["V8duLR"][f1, f2, f3, f4] / (2 * Nc), 'F' + qqqq + '4p' : C["V8duLR"][f1, f2, f3, f4] / 2, 'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2] - C["S8udduRR"][f3, f2, f1, f4] / 4 - C["S8udRR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2 + C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc) + C["S8udRR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '7p' : -C["V8udduLR"][f4, f1, f2, f3].conj(), 'F' + qqqq + '8p' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj() + C["V8udduLR"][f4, f1, f2, f3].conj() / Nc, 'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16, 'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8 + C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc), 'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2] - C["V8udRR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '3' : C["V1udLR"][f3, f4, f1, f2] - C["V8udLR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '4' : C["V8udLR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() - C["S8udduRR"][f4, f1, f2, f3].conj() / 4 - C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc), 'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 + C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc) + C["S8udRR"][f4, f3, f2, f1].conj() / 2, 'F' + qqqq + '7' : -C["V8udduLR"][f3, f2, f1, f4], 'F' + qqqq + '8' : - 2 * C["V1udduLR"][f3, f2, f1, f4] + C["V8udduLR"][f3, f2, f1, f4] / Nc, 'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16, 'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8 + C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc } #case dddd classIV = ['sbsd', 'dbds', 'bsbd'] classVdddd = ['sbss', 'dbdd', 'dsdd', 'sbbb', 'dbbb', 'dsss'] classVddddind = ['sbdd', 'dsbb', 'dbss'] if qqqq in classIV + classVdddd + classVddddind: f1 = dflav[qqqq[0]] f2 = dflav[qqqq[1]] f3 = dflav[qqqq[2]] f4 = dflav[qqqq[3]] return { 'F'+ qqqq +'1' : C["VddLL"][f3, f4, f1, f2], 'F'+ qqqq +'2' : C["VddLL"][f1, f4, f3, f2], 'F'+ qqqq +'3' : C["V1ddLR"][f1, f2, f3, f4] - C["V8ddLR"][f1, f2, f3, f4]/(2 * Nc), 'F'+ qqqq +'4' : C["V8ddLR"][f1, f2, f3, f4] / 2, 'F'+ qqqq +'5' : C["S1ddRR"][f3, f4, f1, f2] - C["S8ddRR"][f3, f2, f1,f4] / 4 - C["S8ddRR"][f3, f4, f1, f2] / (2 * Nc), 'F'+ qqqq +'6' : -C["S1ddRR"][f1, f4, f3, f2] / 2 + C["S8ddRR"][f3, f2, f1, f4] / (4 * Nc) + C["S8ddRR"][f3, f4, f1, f2] / 2, 'F'+ qqqq +'7' : -C["V8ddLR"][f1, f4, f3, f2], 'F'+ qqqq +'8' : -2 * C["V1ddLR"][f1, f4, f3, f2] + C["V8ddLR"][f1, f4, f3, f2] / Nc, 'F'+ qqqq +'9' : -C["S8ddRR"][f3, f2, f1, f4] / 16, 'F'+ qqqq +'10' : -C["S1ddRR"][f1, f4, f3, f2] / 8 + C["S8ddRR"][f3, f2, f1, f4] / (16 * Nc), 'F'+ qqqq +'1p' : C["VddRR"][f3, f4, f1, f2], 'F'+ qqqq +'2p' : C["VddRR"][f1, f4, f3, f2], 'F'+ qqqq +'3p' : C["V1ddLR"][f3, f4, f1, f2] - C["V8ddLR"][f3, f4, f1,f2] / (2 * Nc), 'F'+ qqqq +'4p' : C["V8ddLR"][f3, f4, f1, f2] / 2, 'F'+ qqqq +'5p' : C["S1ddRR"][f4, f3, f2, f1].conj() - C["S8ddRR"][f4, f1, f2, f3].conj() / 4 -C["S8ddRR"][f4, f3, f2, f1].conj() / 2 / Nc, 'F'+ qqqq +'6p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 2 + C["S8ddRR"][f4, f1, f2, f3].conj() / 4 / Nc + C["S8ddRR"][f4, f3, f2, f1].conj() / 2, 'F'+ qqqq +'7p' : -C["V8ddLR"][f3, f2, f1, f4], 'F'+ qqqq +'8p' : -2 * C["V1ddLR"][f3, f2, f1, f4] + C["V8ddLR"][f3, f2, f1, f4] / Nc, 'F'+ qqqq +'9p' : -C["S8ddRR"][f4, f1, f2, f3].conj() / 16, 'F'+ qqqq +'10p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 8 + C["S8ddRR"][f4, f1, f2, f3].conj() / 16 / Nc } #case uuuu classVuuuu = ['ucuu', 'cucc', 'cuuu', 'uccc'] if qqqq in classVuuuu: f1 = uflav[qqqq[0]] f2 = uflav[qqqq[1]] f3 = uflav[qqqq[2]] f4 = uflav[qqqq[3]] return { 'F' + qqqq + '1' : C["VuuLL"][f3, f4, f1, f2], 'F' + qqqq + '2' : C["VuuLL"][f1, f4, f3, f2], 'F' + qqqq + '3' : C["V1uuLR"][f1, f2, f3, f4] - C["V8uuLR"][f1, f2, f3, f4] / (2 * Nc), 'F' + qqqq + '4' : C["V8uuLR"][f1, f2, f3, f4] / 2, 'F' + qqqq + '5' : C["S1uuRR"][f3, f4, f1, f2] - C["S8uuRR"][f3, f2, f1, f4] / 4 - C["S8uuRR"][f3, f4, f1, f2] / (2 * Nc), 'F' + qqqq + '6' : -C["S1uuRR"][f1, f4, f3, f2] / 2 + C["S8uuRR"][f3, f2, f1, f4] / (4 * Nc) + C["S8uuRR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '7' : -C["V8uuLR"][f1, f4, f3, f2], 'F' + qqqq + '8' : -2 * C["V1uuLR"][f1, f4, f3, f2] + C["V8uuLR"][f1, f4, f3, f2] / Nc, 'F' + qqqq + '9' : -C["S8uuRR"][f3, f2, f1, f4] / 16, 'F' + qqqq + '10' : -C["S1uuRR"][f1, f4, f3, f2] / 8 + C["S8uuRR"][f3, f2, f1, f4] / (16 * Nc), 'F'+ qqqq + '1p': C["VuuRR"][f3, f4, f1, f2], 'F' + qqqq + '2p': C["VuuRR"][f1, f3, f4, f2], 'F' + qqqq + '3p' : C["V1uuLR"][f3, f4, f1, f2] - C["V8uuLR"][f3, f4, f1,f2] / (2 * Nc), 'F' + qqqq + '4p' : C["V8uuLR"][f3, f4, f1, f2] / 2, 'F' + qqqq + '5p' : C["S1uuRR"][f4, f3, f2, f1].conj() - C["S8uuRR"][f4, f1, f2, f3].conj() / 4 - C["S8uuRR"][f4, f3, f2, f1].conj() / 2 / Nc, 'F' + qqqq + '6p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 2 + C["S8uuRR"][f4, f1, f2, f3].conj() / 4 / Nc + C["S8uuRR"][f4, f3, f2, f1].conj() / 2, 'F' + qqqq + '7p' : -C["V8uuLR"][f3, f2, f1, f4], 'F' + qqqq + '8p' : -2 * C["V1uuLR"][f3, f2, f1, f4] + C["V8uuLR"][f3, f2, f1, f4] / Nc, 'F' + qqqq + '9p' : -C["S8uuRR"][f4, f1, f2, f3].conj() / 16, 'F' + qqqq + '10p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 8 + C["S8uuRR"][f4, f1, f2, f3].conj() / 16 / Nc } else: raise ValueError("Case not implemented: {}".format(qqqq))
[ "def", "_JMS_to_Fierz_III_IV_V", "(", "C", ",", "qqqq", ")", ":", "#case dduu", "classIII", "=", "[", "'sbuc'", ",", "'sbcu'", ",", "'dbuc'", ",", "'dbcu'", ",", "'dsuc'", ",", "'dscu'", "]", "classVdduu", "=", "[", "'sbuu'", ",", "'dbuu'", ",", "'dsuu'"...
From JMS to 4-quark Fierz basis for Classes III, IV and V. `qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc.
[ "From", "JMS", "to", "4", "-", "quark", "Fierz", "basis", "for", "Classes", "III", "IV", "and", "V", ".", "qqqq", "should", "be", "of", "the", "form", "sbuc", "sdcc", "ucuu", "etc", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/block.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/block.py#L465-L474
def disable_snapshots(self, volume_id, schedule_type): """Disables snapshots for a specific block volume at a given schedule :param integer volume_id: The id of the volume :param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY' :return: Returns whether successfully disabled or not """ return self.client.call('Network_Storage', 'disableSnapshots', schedule_type, id=volume_id)
[ "def", "disable_snapshots", "(", "self", ",", "volume_id", ",", "schedule_type", ")", ":", "return", "self", ".", "client", ".", "call", "(", "'Network_Storage'", ",", "'disableSnapshots'", ",", "schedule_type", ",", "id", "=", "volume_id", ")" ]
Disables snapshots for a specific block volume at a given schedule :param integer volume_id: The id of the volume :param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY' :return: Returns whether successfully disabled or not
[ "Disables", "snapshots", "for", "a", "specific", "block", "volume", "at", "a", "given", "schedule" ]
python
train
jbeluch/xbmcswift2
xbmcswift2/xbmcmixin.py
https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/xbmcmixin.py#L253-L262
def get_view_mode_id(self, view_mode): '''Attempts to return a view_mode_id for a given view_mode taking into account the current skin. If not view_mode_id can be found, None is returned. 'thumbnail' is currently the only suppported view_mode. ''' view_mode_ids = VIEW_MODES.get(view_mode.lower()) if view_mode_ids: return view_mode_ids.get(xbmc.getSkinDir()) return None
[ "def", "get_view_mode_id", "(", "self", ",", "view_mode", ")", ":", "view_mode_ids", "=", "VIEW_MODES", ".", "get", "(", "view_mode", ".", "lower", "(", ")", ")", "if", "view_mode_ids", ":", "return", "view_mode_ids", ".", "get", "(", "xbmc", ".", "getSkin...
Attempts to return a view_mode_id for a given view_mode taking into account the current skin. If not view_mode_id can be found, None is returned. 'thumbnail' is currently the only suppported view_mode.
[ "Attempts", "to", "return", "a", "view_mode_id", "for", "a", "given", "view_mode", "taking", "into", "account", "the", "current", "skin", ".", "If", "not", "view_mode_id", "can", "be", "found", "None", "is", "returned", ".", "thumbnail", "is", "currently", "...
python
train