text
stringlengths
81
112k
Feed stub def _feed(self, cube, data_sources, data_sinks, global_iter_args): """ Feed stub """ try: self._feed_impl(cube, data_sources, data_sinks, global_iter_args) except Exception as e: montblanc.log.exception("Feed Exception") raise
Implementation of staging_area feeding def _feed_impl(self, cube, data_sources, data_sinks, global_iter_args): """ Implementation of staging_area feeding """ session = self._tf_session FD = self._tf_feed_data LSA = FD.local # Get source strides out before the local sizes are modified during # the source loops below src_types = LSA.sources.keys() src_strides = [int(i) for i in cube.dim_extent_size(*src_types)] src_staging_areas = [[LSA.sources[t][s] for t in src_types] for s in range(self._nr_of_shards)] compute_feed_dict = { ph: cube.dim_global_size(n) for n, ph in FD.src_ph_vars.iteritems() } compute_feed_dict.update({ ph: getattr(cube, n) for n, ph in FD.property_ph_vars.iteritems() }) chunks_fed = 0 which_shard = itertools.cycle([self._shard(d,s) for s in range(self._shards_per_device) for d, dev in enumerate(self._devices)]) while True: try: # Get the descriptor describing a portion of the RIME result = session.run(LSA.descriptor.get_op) descriptor = result['descriptor'] except tf.errors.OutOfRangeError as e: montblanc.log.exception("Descriptor reading exception") # Quit if EOF if descriptor[0] == -1: break # Make it read-only so we can hash the contents descriptor.flags.writeable = False # Find indices of the emptiest staging_areas and, by implication # the shard with the least work assigned to it emptiest_staging_areas = np.argsort(self._inputs_waiting.get()) shard = emptiest_staging_areas[0] shard = which_shard.next() feed_f = self._feed_executors[shard].submit(self._feed_actual, data_sources.copy(), cube.copy(), descriptor, shard, src_types, src_strides, src_staging_areas[shard], global_iter_args) compute_f = self._compute_executors[shard].submit(self._compute, compute_feed_dict, shard) consume_f = self._consumer_executor.submit(self._consume, data_sinks.copy(), cube.copy(), global_iter_args) self._inputs_waiting.increment(shard) yield (feed_f, compute_f, consume_f) chunks_fed += 1 montblanc.log.info("Done feeding {n} chunks.".format(n=chunks_fed))
Call the tensorflow compute def _compute(self, feed_dict, shard): """ Call the tensorflow compute """ try: descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict) self._inputs_waiting.decrement(shard) except Exception as e: montblanc.log.exception("Compute Exception") raise
Consume stub def _consume(self, data_sinks, cube, global_iter_args): """ Consume stub """ try: return self._consume_impl(data_sinks, cube, global_iter_args) except Exception as e: montblanc.log.exception("Consumer Exception") raise e, None, sys.exc_info()[2]
Consume def _consume_impl(self, data_sinks, cube, global_iter_args): """ Consume """ LSA = self._tf_feed_data.local output = self._tfrun(LSA.output.get_op) # Expect the descriptor in the first tuple position assert len(output) > 0 assert LSA.output.fed_arrays[0] == 'descriptor' descriptor = output['descriptor'] # Make it read-only so we can hash the contents descriptor.flags.writeable = False dims = self._transcoder.decode(descriptor) cube.update_dimensions(dims) # Obtain and remove input data from the source cache try: input_data = self._source_cache.pop(descriptor.data) except KeyError: raise ValueError("No input data cache available " "in source cache for descriptor {}!" .format(descriptor)) # For each array in our output, call the associated data sink gen = ((n, a) for n, a in output.iteritems() if not n == 'descriptor') for n, a in gen: sink_context = SinkContext(n, cube, self.config(), global_iter_args, cube.array(n) if n in cube.arrays() else {}, a, input_data) _supply_data(data_sinks[n], sink_context)
Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object. def rime_solver_cfg(**kwargs): """ Produces a SolverConfiguration object, inherited from a simple python dict, and containing the options required to configure the RIME Solver. Keyword arguments ----------------- Any keyword arguments are inserted into the returned dict. Returns ------- A SolverConfiguration object. """ from configuration import (load_config, config_validator, raise_validator_errors) def _merge_copy(d1, d2): return { k: _merge_copy(d1[k], d2[k]) if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict) else d2[k] for k in d2 } try: cfg_file = kwargs.pop('cfg_file') except KeyError as e: slvr_cfg = kwargs else: cfg = load_config(cfg_file) slvr_cfg = _merge_copy(cfg, kwargs) # Validate the configuration, raising any errors validator = config_validator() validator.validate(slvr_cfg) raise_validator_errors(validator) return validator.document
Returns a dictionary of beam filename pairs, keyed on correlation,from the cartesian product of correlations and real, imaginary pairs Given 'beam_$(corr)_$(reim).fits' returns: { 'xx' : ('beam_xx_re.fits', 'beam_xx_im.fits'), 'xy' : ('beam_xy_re.fits', 'beam_xy_im.fits'), ... 'yy' : ('beam_yy_re.fits', 'beam_yy_im.fits'), } Given 'beam_$(CORR)_$(REIM).fits' returns: { 'xx' : ('beam_XX_RE.fits', 'beam_XX_IM.fits'), 'xy' : ('beam_XY_RE.fits', 'beam_XY_IM.fits'), ... 'yy' : ('beam_YY_RE.fits', 'beam_YY_IM.fits'), } def _create_filenames(filename_schema, feed_type): """ Returns a dictionary of beam filename pairs, keyed on correlation,from the cartesian product of correlations and real, imaginary pairs Given 'beam_$(corr)_$(reim).fits' returns: { 'xx' : ('beam_xx_re.fits', 'beam_xx_im.fits'), 'xy' : ('beam_xy_re.fits', 'beam_xy_im.fits'), ... 'yy' : ('beam_yy_re.fits', 'beam_yy_im.fits'), } Given 'beam_$(CORR)_$(REIM).fits' returns: { 'xx' : ('beam_XX_RE.fits', 'beam_XX_IM.fits'), 'xy' : ('beam_XY_RE.fits', 'beam_XY_IM.fits'), ... 'yy' : ('beam_YY_RE.fits', 'beam_YY_IM.fits'), } """ template = FitsFilenameTemplate(filename_schema) def _re_im_filenames(corr, template): try: return tuple(template.substitute( corr=corr.lower(), CORR=corr.upper(), reim=ri.lower(), REIM=ri.upper()) for ri in REIM) except KeyError: raise ValueError("Invalid filename schema '%s'. " "FITS Beam filename schemas " "must follow forms such as " "'beam_$(corr)_$(reim).fits' or " "'beam_$(CORR)_$(REIM).fits." % filename_schema) if feed_type == 'linear': CORRELATIONS = LINEAR_CORRELATIONS elif feed_type == 'circular': CORRELATIONS = CIRCULAR_CORRELATIONS else: raise ValueError("Invalid feed_type '{}'. " "Should be 'linear' or 'circular'") return collections.OrderedDict( (c, _re_im_filenames(c, template)) for c in CORRELATIONS)
Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping def _open_fits_files(filenames): """ Given a {correlation: filename} mapping for filenames returns a {correlation: file handle} mapping """ kw = { 'mode' : 'update', 'memmap' : False } def _fh(fn): """ Returns a filehandle or None if file does not exist """ return fits.open(fn, **kw) if os.path.exists(fn) else None return collections.OrderedDict( (corr, tuple(_fh(fn) for fn in files)) for corr, files in filenames.iteritems() )
Create a FitsAxes object def _create_axes(filenames, file_dict): """ Create a FitsAxes object """ try: # Loop through the file_dictionary, finding the # first open FITS file. f = iter(f for tup in file_dict.itervalues() for f in tup if f is not None).next() except StopIteration as e: raise (ValueError("No FITS files were found. " "Searched filenames: '{f}'." .format( f=filenames.values())), None, sys.exc_info()[2]) # Create a FitsAxes object axes = FitsAxes(f[0].header) # Scale any axes in degrees to radians for i, u in enumerate(axes.cunit): if u == 'DEG': axes.cunit[i] = 'RAD' axes.set_axis_scale(i, np.pi/180.0) return axes
Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files. def _initialise(self, feed_type="linear"): """ Initialise the object by generating appropriate filenames, opening associated file handles and inspecting the FITS axes of these files. """ self._filenames = filenames = _create_filenames(self._filename_schema, feed_type) self._files = files = _open_fits_files(filenames) self._axes = axes = _create_axes(filenames, files) self._dim_indices = dim_indices = l_ax, m_ax, f_ax = tuple( axes.iaxis(d) for d in self._fits_dims) # Complain if we can't find required axes for i, ax in zip(dim_indices, self._fits_dims): if i == -1: raise ValueError("'%s' axis not found!" % ax) self._cube_extents = _cube_extents(axes, l_ax, m_ax, f_ax, self._l_sign, self._m_sign) self._shape = tuple(axes.naxis[d] for d in dim_indices) + (4,) self._beam_freq_map = axes.grid[f_ax] # Now describe our dimension sizes self._dim_updates = [(n, axes.naxis[i]) for n, i in zip(self._beam_dims, dim_indices)] self._initialised = True
ebeam cube data source def ebeam(self, context): """ ebeam cube data source """ if context.shape != self.shape: raise ValueError("Partial feeding of the " "beam cube is not yet supported %s %s." % (context.shape, self.shape)) ebeam = np.empty(context.shape, context.dtype) # Iterate through the correlations, # assigning real and imaginary data, if present, # otherwise zeroing the correlation for i, (re, im) in enumerate(self._files.itervalues()): ebeam[:,:,:,i].real[:] = 0 if re is None else re[0].data.T ebeam[:,:,:,i].imag[:] = 0 if im is None else im[0].data.T return ebeam
model visibility data sink def model_vis(self, context): """ model visibility data sink """ column = self._vis_column msshape = None # Do we have a column descriptor for the supplied column? try: coldesc = self._manager.column_descriptors[column] except KeyError as e: coldesc = None # Try to get the shape from the descriptor if coldesc is not None: try: msshape = [-1] + coldesc['shape'].tolist() except KeyError as e: msshape = None # Otherwise guess it and warn if msshape is None: guessed_shape = [self._manager._nchan, 4] montblanc.log.warn("Could not obtain 'shape' from the '{c}' " "column descriptor. Guessing it is '{gs}'.".format( c=column, gs=guessed_shape)) msshape = [-1] + guessed_shape lrow, urow = MS.row_extents(context) self._manager.ordered_main_table.putcol(column, context.data.reshape(msshape), startrow=lrow, nrow=urow-lrow)
Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions. def _cache(method): """ Decorator for caching data source return values Create a key index for the proxied array in the context. Iterate over the array shape descriptor e.g. (ntime, nbl, 3) returning tuples containing the lower and upper extents of string dimensions. Takes (0, d) in the case of an integer dimensions. """ @functools.wraps(method) def memoizer(self, context): # Construct the key for the given index idx = context.array_extents(context.name) key = tuple(i for t in idx for i in t) with self._lock: # Access the sub-cache for this data source array_cache = self._cache[context.name] # Cache miss, call the data source if key not in array_cache: array_cache[key] = method(context) return array_cache[key] return memoizer
Decorator returning a method that proxies a data source. def _proxy(method): """ Decorator returning a method that proxies a data source. """ @functools.wraps(method) def memoizer(self, context): return method(context) return memoizer
Perform any logic on solution start def start(self, start_context): """ Perform any logic on solution start """ for p in self._providers: p.start(start_context) if self._clear_start: self.clear_cache()
Perform any logic on solution stop def stop(self, stop_context): """ Perform any logic on solution stop """ for p in self._providers: p.stop(stop_context) if self._clear_stop: self.clear_cache()
Compute base antenna pairs def default_base_ant_pairs(self, context): """ Compute base antenna pairs """ k = 0 if context.cfg['auto_correlations'] == True else 1 na = context.dim_global_size('na') gen = (i.astype(context.dtype) for i in np.triu_indices(na, k)) # Cache np.triu_indices(na, k) as its likely that (na, k) will # stay constant much of the time. Assumption here is that this # method will be grafted onto a DefaultsSourceProvider with # the appropriate members. if self._is_cached: array_cache = self._chunk_cache['default_base_ant_pairs'] key = (k, na) # Cache miss if key not in array_cache: array_cache[key] = tuple(gen) return array_cache[key] return tuple(gen)
Default antenna1 values def default_antenna1(self, context): """ Default antenna1 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant1_result = np.empty(context.shape, context.dtype) ant1_result[:,:] = ant1[np.newaxis,bl:bu] return ant1_result
Default antenna2 values def default_antenna2(self, context): """ Default antenna2 values """ ant1, ant2 = default_base_ant_pairs(self, context) (tl, tu), (bl, bu) = context.dim_extents('ntime', 'nbl') ant2_result = np.empty(context.shape, context.dtype) ant2_result[:,:] = ant2[np.newaxis,bl:bu] return ant2_result
Returns [[1, 0], tiled up to other dimensions [0, 1]] def identity_on_pols(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 1]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,1]]] return A
Returns [[1, 0], tiled up to other dimensions [0, 0]] def default_stokes(self, context): """ Returns [[1, 0], tiled up to other dimensions [0, 0]] """ A = np.empty(context.shape, context.dtype) A[:,:,:] = [[[1,0,0,0]]] return A
Frequency data source def frequency(self, context): """ Frequency data source """ channels = self._manager.spectral_window_table.getcol(MS.CHAN_FREQ) return channels.reshape(context.shape).astype(context.dtype)
Reference frequency data source def ref_frequency(self, context): """ Reference frequency data source """ num_chans = self._manager.spectral_window_table.getcol(MS.NUM_CHAN) ref_freqs = self._manager.spectral_window_table.getcol(MS.REF_FREQUENCY) data = np.hstack((np.repeat(rf, bs) for bs, rf in zip(num_chans, ref_freqs))) return data.reshape(context.shape).astype(context.dtype)
Per-antenna UVW coordinate data source def uvw(self, context): """ Per-antenna UVW coordinate data source """ # Hacky access of private member cube = context._cube # Create antenna1 source context a1_actual = cube.array("antenna1", reify=True) a1_ctx = SourceContext("antenna1", cube, context.cfg, context.iter_args, cube.array("antenna1"), a1_actual.shape, a1_actual.dtype) # Create antenna2 source context a2_actual = cube.array("antenna2", reify=True) a2_ctx = SourceContext("antenna2", cube, context.cfg, context.iter_args, cube.array("antenna2"), a2_actual.shape, a2_actual.dtype) # Get antenna1 and antenna2 data ant1 = self.antenna1(a1_ctx).ravel() ant2 = self.antenna2(a2_ctx).ravel() # Obtain per baseline UVW data lrow, urow = MS.uvw_row_extents(context) uvw = self._manager.ordered_uvw_table.getcol(MS.UVW, startrow=lrow, nrow=urow-lrow) # Perform the per-antenna UVW decomposition ntime, nbl = context.dim_extent_size('ntime', 'nbl') na = context.dim_global_size('na') chunks = np.repeat(nbl, ntime).astype(ant1.dtype) auvw = mbu.antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=na) return auvw.reshape(context.shape).astype(context.dtype)
antenna1 data source def antenna1(self, context): """ antenna1 data source """ lrow, urow = MS.uvw_row_extents(context) antenna1 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA1, startrow=lrow, nrow=urow-lrow) return antenna1.reshape(context.shape).astype(context.dtype)
antenna2 data source def antenna2(self, context): """ antenna2 data source """ lrow, urow = MS.uvw_row_extents(context) antenna2 = self._manager.ordered_uvw_table.getcol( MS.ANTENNA2, startrow=lrow, nrow=urow-lrow) return antenna2.reshape(context.shape).astype(context.dtype)
parallactic angle data source def parallactic_angles(self, context): """ parallactic angle data source """ # Time and antenna extents (lt, ut), (la, ua) = context.dim_extents('ntime', 'na') return (mbu.parallactic_angles(self._times[lt:ut], self._antenna_positions[la:ua], self._phase_dir) .reshape(context.shape) .astype(context.dtype))
Observed visibility data source def observed_vis(self, context): """ Observed visibility data source """ lrow, urow = MS.row_extents(context) data = self._manager.ordered_main_table.getcol( self._vis_column, startrow=lrow, nrow=urow-lrow) return data.reshape(context.shape).astype(context.dtype)
Flag data source def flag(self, context): """ Flag data source """ lrow, urow = MS.row_extents(context) flag = self._manager.ordered_main_table.getcol( MS.FLAG, startrow=lrow, nrow=urow-lrow) return flag.reshape(context.shape).astype(context.dtype)
Weight data source def weight(self, context): """ Weight data source """ lrow, urow = MS.row_extents(context) weight = self._manager.ordered_main_table.getcol( MS.WEIGHT, startrow=lrow, nrow=urow-lrow) # WEIGHT is applied across all channels weight = np.repeat(weight, self._manager.channels_per_band, 0) return weight.reshape(context.shape).astype(context.dtype)
Load the tensorflow library def load_tf_lib(): """ Load the tensorflow library """ from os.path import join as pjoin import pkg_resources import tensorflow as tf path = pjoin('ext', 'rime.so') rime_lib_path = pkg_resources.resource_filename("montblanc", path) return tf.load_op_library(rime_lib_path)
Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error. def raise_validator_errors(validator): """ Raise any errors associated with the validator. Parameters ---------- validator : :class:`cerberus.Validator` Validator Raises ------ ValueError Raised if errors existed on `validator`. Message describing each error and information associated with the configuration option causing the error. """ if len(validator._errors) == 0: return def _path_str(path, name=None): """ String of the document/schema path. `cfg["foo"]["bar"]` """ L = [name] if name is not None else [] L.extend('["%s"]' % p for p in path) return "".join(L) def _path_leaf(path, dicts): """ Dictionary Leaf of the schema/document given the path """ for p in path: dicts = dicts[p] return dicts wrap = partial(textwrap.wrap, initial_indent=' '*4, subsequent_indent=' '*8) msg = ["There were configuration errors:"] for e in validator._errors: schema_leaf = _path_leaf(e.document_path, validator.schema) doc_str = _path_str(e.document_path, "cfg") msg.append("Invalid configuration option %s == '%s'." % (doc_str, e.value)) try: otype = schema_leaf["type"] msg.extend(wrap("Type must be '%s'." % otype)) except KeyError: pass try: allowed = schema_leaf["allowed"] msg.extend(wrap("Allowed values are '%s'." % allowed)) except KeyError: pass try: description = schema_leaf["__description__"] msg.extend(wrap("Description: %s" % description)) except KeyError: pass raise ValueError("\n".join(msg))
Take a multiline text and indent it as a block def indented(text, level, indent=2): """Take a multiline text and indent it as a block""" return "\n".join("%s%s" % (level * indent * " ", s) for s in text.splitlines())
Put curly brackets round an indented text def dumped(text, level, indent=2): """Put curly brackets round an indented text""" return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. def copy_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file copy. Copying in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_COPY, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. def move_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file move. Moving in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_MOVE, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. def rename_file( source_path, target_path, allow_undo=True, no_confirm=False, rename_on_collision=True, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file rename. Renaming in this way allows the possibility of undo, auto-renaming, and showing the "flying file" animation during the copy. The default options allow for undo, don't automatically clobber on a name clash, automatically rename on collision and display the animation. """ return _file_operation( shellcon.FO_RENAME, source_path, target_path, allow_undo, no_confirm, rename_on_collision, silent, extra_flags, hWnd )
Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation. def delete_file( source_path, allow_undo=True, no_confirm=False, silent=False, extra_flags=0, hWnd=None ): """Perform a shell-based file delete. Deleting in this way uses the system recycle bin, allows the possibility of undo, and showing the "flying file" animation during the delete. The default options allow for undo, don't automatically clobber on a name clash and display the animation. """ return _file_operation( shellcon.FO_DELETE, source_path, None, allow_undo, no_confirm, False, silent, extra_flags, hWnd )
Pick out info from MS documents with embedded structured storage(typically MS Word docs etc.) Returns a dictionary of information found def structured_storage(filename): """Pick out info from MS documents with embedded structured storage(typically MS Word docs etc.) Returns a dictionary of information found """ if not pythoncom.StgIsStorageFile(filename): return {} flags = storagecon.STGM_READ | storagecon.STGM_SHARE_EXCLUSIVE storage = pythoncom.StgOpenStorage(filename, None, flags) try: properties_storage = storage.QueryInterface(pythoncom.IID_IPropertySetStorage) except pythoncom.com_error: return {} property_sheet = properties_storage.Open(FMTID_USER_DEFINED_PROPERTIES) try: data = property_sheet.ReadMultiple(PROPERTIES) finally: property_sheet = None title, subject, author, created_on, keywords, comments, template_used, \ updated_by, edited_on, printed_on, saved_on, \ n_pages, n_words, n_characters, \ application = data result = {} if title: result['title'] = title if subject: result['subject'] = subject if author: result['author'] = author if created_on: result['created_on'] = created_on if keywords: result['keywords'] = keywords if comments: result['comments'] = comments if template_used: result['template_used'] = template_used if updated_by: result['updated_by'] = updated_by if edited_on: result['edited_on'] = edited_on if printed_on: result['printed_on'] = printed_on if saved_on: result['saved_on'] = saved_on if n_pages: result['n_pages'] = n_pages if n_words: result['n_words'] = n_words if n_characters: result['n_characters'] = n_characters if application: result['application'] = application return result
Create a Windows shortcut: Path - As what file should the shortcut be created? Target - What command should the desktop use? Arguments - What arguments should be supplied to the command? StartIn - What folder should the command start in? Icon -(filename, index) What icon should be used for the shortcut? Description - What description should the shortcut be given? eg CreateShortcut( Path=os.path.join(desktop(), "PythonI.lnk"), Target=r"c:\python\python.exe", Icon=(r"c:\python\python.exe", 0), Description="Python Interpreter" ) def CreateShortcut(Path, Target, Arguments="", StartIn="", Icon=("", 0), Description=""): """Create a Windows shortcut: Path - As what file should the shortcut be created? Target - What command should the desktop use? Arguments - What arguments should be supplied to the command? StartIn - What folder should the command start in? Icon -(filename, index) What icon should be used for the shortcut? Description - What description should the shortcut be given? eg CreateShortcut( Path=os.path.join(desktop(), "PythonI.lnk"), Target=r"c:\python\python.exe", Icon=(r"c:\python\python.exe", 0), Description="Python Interpreter" ) """ lnk = shortcut(Target) lnk.arguments = Arguments lnk.working_directory = StartIn lnk.icon_location = Icon lnk.description = Description lnk.write(Path)
Restore the most recent version of a filepath, returning the filepath it was restored to(as rename-on-collision will apply if a file already exists at that path). def undelete(self, original_filepath): """Restore the most recent version of a filepath, returning the filepath it was restored to(as rename-on-collision will apply if a file already exists at that path). """ candidates = self.versions(original_filepath) if not candidates: raise x_not_found_in_recycle_bin("%s not found in the Recycle Bin" % original_filepath) # # NB Can't use max(key=...) until Python 2.6+ # newest = sorted(candidates, key=lambda entry: entry.recycle_date())[-1] return newest.undelete()
Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary def _get_queue_types(fed_arrays, data_sources): """ Given a list of arrays to feed in fed_arrays, return a list of associated queue types, obtained from tuples in the data_sources dictionary """ try: return [data_sources[n].dtype for n in fed_arrays] except KeyError as e: raise ValueError("Array '{k}' has no data source!" .format(k=e.message)), None, sys.exc_info()[2]
Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names def create_queue_wrapper(name, queue_size, fed_arrays, data_sources, *args, **kwargs): """ Arguments name: string Name of the queue queue_size: integer Size of the queue fed_arrays: list array names that will be fed by this queue data_sources: dict (lambda/method, dtype) tuples, keyed on array names """ qtype = SingleInputMultiQueueWrapper if 'count' in kwargs else QueueWrapper return qtype(name, queue_size, fed_arrays, data_sources, *args, **kwargs)
Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results. def parse_python_assigns(assign_str): """ Parses a string, containing assign statements into a dictionary. .. code-block:: python h5 = katdal.open('123456789.h5') kwargs = parse_python_assigns("spw=3; scans=[1,2];" "targets='bpcal,radec';" "channels=slice(0,2048)") h5.select(**kwargs) Parameters ---------- assign_str: str Assignment string. Should only contain assignment statements assigning python literals or builtin function calls, to variable names. Multiple assignment statements should be separated by semi-colons. Returns ------- dict Dictionary { name: value } containing assignment results. """ if not assign_str: return {} def _eval_value(stmt_value): # If the statement value is a call to a builtin, try evaluate it if isinstance(stmt_value, ast.Call): func_name = stmt_value.func.id if func_name not in _BUILTIN_WHITELIST: raise ValueError("Function '%s' in '%s' is not builtin. " "Available builtins: '%s'" % (func_name, assign_str, list(_BUILTIN_WHITELIST))) # Recursively pass arguments through this same function if stmt_value.args is not None: args = tuple(_eval_value(a) for a in stmt_value.args) else: args = () # Recursively pass keyword arguments through this same function if stmt_value.keywords is not None: kwargs = {kw.arg : _eval_value(kw.value) for kw in stmt_value.keywords} else: kwargs = {} return getattr(__builtin__, func_name)(*args, **kwargs) # Try a literal eval else: return ast.literal_eval(stmt_value) # Variable dictionary variables = {} # Parse the assignment string stmts = ast.parse(assign_str, mode='single').body for i, stmt in enumerate(stmts): if not isinstance(stmt, ast.Assign): raise ValueError("Statement %d in '%s' is not a " "variable assignment." % (i, assign_str)) # Evaluate assignment lhs values = _eval_value(stmt.value) # "a = b = c" => targets 'a' and 'b' with 'c' as result for target in stmt.targets: # a = 2 if isinstance(target, ast.Name): variables[target.id] = values # Tuple/List unpacking case # (a, b) = 2 elif isinstance(target, (ast.Tuple, ast.List)): # Require all tuple/list elements to be variable names, # although anything else is probably a syntax error if not all(isinstance(e, ast.Name) for e in target.elts): raise ValueError("Tuple unpacking in assignment %d " "in expression '%s' failed as not all " "tuple contents are variable names.") # Promote for zip and length checking if not isinstance(values, (tuple, list)): elements = (values,) else: elements = values if not len(target.elts) == len(elements): raise ValueError("Unpacking '%s' into a tuple/list in " "assignment %d of expression '%s' failed. " "The number of tuple elements did not match " "the number of values." % (values, i, assign_str)) # Unpack for variable, value in zip(target.elts, elements): variables[variable.id] = value else: raise TypeError("'%s' types are not supported" "as assignment targets." % type(target)) return variables
Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. def find_sinks(obj): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ SINK_ARGSPEC = ['self', 'context'] return { n: m for n, m in inspect.getmembers(obj, inspect.ismethod) if inspect.getargspec(m)[0] == SINK_ARGSPEC }
Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. def sinks(self): """ Returns a dictionary of sink methods found on this object, keyed on method name. Sink methods are identified by (self, context) arguments on this object. For example: def f(self, context): ... is a sink method, but def f(self, ctx): ... is not. """ try: return self._sinks except AttributeError: self._sinks = find_sinks(self) return self._sinks
numba implementation of antenna_uvw def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna): """ numba implementation of antenna_uvw """ if antenna1.ndim != 1: raise ValueError("antenna1 shape should be (row,)") if antenna2.ndim != 1: raise ValueError("antenna2 shape should be (row,)") if uvw.ndim != 2 or uvw.shape[1] != 3: raise ValueError("uvw shape should be (row, 3)") if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]): raise ValueError("First dimension of uvw, antenna1 " "and antenna2 do not match") if chunks.ndim != 1: raise ValueError("chunks shape should be (utime,)") if nr_of_antenna < 1: raise ValueError("nr_of_antenna < 1") ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3) antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype) start = 0 for ci, chunk in enumerate(chunks): end = start + chunk # one pass should be enough! _antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end) start = end return antenna_uvw
Raises informative exception for an invalid decomposition def _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err): """ Raises informative exception for an invalid decomposition """ start = 0 problem_str = [] for ci, chunk in enumerate(chunks): end = start + chunk ant1 = antenna1[start:end] ant2 = antenna2[start:end] cuvw = uvw[start:end] ant1_uvw = ant_uvw[ci, ant1, :] ant2_uvw = ant_uvw[ci, ant2, :] ruvw = ant2_uvw - ant1_uvw # Identifty rows where any of the UVW components differed close = np.isclose(ruvw, cuvw) problems = np.nonzero(np.logical_or.reduce(np.invert(close), axis=1)) for row in problems[0]: problem_str.append("[row %d [%d, %d] (chunk %d)]: " "original %s recovered %s " "ant1 %s ant2 %s" % ( start+row, ant1[row], ant2[row], ci, cuvw[row], ruvw[row], ant1_uvw[row], ant2_uvw[row])) # Exit inner loop early if len(problem_str) >= max_err: break # Exit outer loop early if len(problem_str) >= max_err: break start = end # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna UVW Decomposition Failed", "The following differences were found " "(first 100):"] + problem_str raise AntennaUVWDecompositionError('\n'.join(problem_str))
Raises an informative error for missing antenna def _raise_missing_antenna_errors(ant_uvw, max_err): """ Raises an informative error for missing antenna """ # Find antenna uvw coordinates where any UVW component was nan # nan + real == nan problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2)) problem_str = [] for c, a in zip(*problems): problem_str.append("[chunk %d antenna %d]" % (c, a)) # Exit early if len(problem_str) >= max_err: break # Return early if nothing was wrong if len(problem_str) == 0: return # Add a preamble and raise exception problem_str = ["Antenna were missing"] + problem_str raise AntennaMissingError('\n'.join(problem_str))
Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3) def antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna, check_missing=False, check_decomposition=False, max_err=100): """ Computes per-antenna UVW coordinates from baseline ``uvw``, ``antenna1`` and ``antenna2`` coordinates logically grouped into baseline chunks. The example below illustrates two baseline chunks of size 6 and 5, respectively. .. code-block:: python uvw = ... ant1 = np.array([0, 0, 0, 1, 1, 2, 0, 0, 0, 1, 1], dtype=np.int32) ant2 = np.array([1, 2, 3, 2, 3, 3, 1, 2, 3, 1, 2], dtype=np.int32) chunks = np.array([6, 5], dtype=np.int32) ant_uv = antenna_uvw(uvw, ant1, ant2, chunks, nr_of_antenna=4) The first antenna of the first baseline of a chunk is chosen as the origin of the antenna coordinate system, while the second antenna is set to the negative of the baseline UVW coordinate. Subsequent antenna UVW coordinates are iteratively derived from the first two coordinates. Thus, the baseline indices need not be properly ordered (within the chunk). If it is not possible to derive coordinates for an antenna, it's coordinate will be set to nan. Parameters ---------- uvw : np.ndarray Baseline UVW coordinates of shape (row, 3) antenna1 : np.ndarray Baseline first antenna of shape (row,) antenna2 : np.ndarray Baseline second antenna of shape (row,) chunks : np.ndarray Number of baselines per unique timestep with shape (chunks,) :code:`np.sum(chunks) == row` should hold. nr_of_antenna : int Total number of antenna in the solution. check_missing (optional) : bool If ``True`` raises an exception if it was not possible to compute UVW coordinates for all antenna (i.e. some were nan). Defaults to ``False``. check_decomposition (optional) : bool If ``True``, checks that the antenna decomposition accurately reproduces the coordinates in ``uvw``, or that :code:`ant_uvw[c,ant1,:] - ant_uvw[c,ant2,:] == uvw[s:e,:]` where ``s`` and ``e`` are the start and end rows of chunk ``c`` respectively. Defaults to ``False``. max_err (optional) : integer Maximum numbers of errors when checking for missing antenna or innacurate decompositions. Defaults to ``100``. Returns ------- np.ndarray Antenna UVW coordinates of shape (chunks, nr_of_antenna, 3) """ ant_uvw = _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna) if check_missing: _raise_missing_antenna_errors(ant_uvw, max_err=max_err) if check_decomposition: _raise_decomposition_errors(uvw, antenna1, antenna2, chunks, ant_uvw, max_err=max_err) return ant_uvw
Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0} def default_sources(**kwargs): """ Returns a dictionary mapping source types to number of sources. If the number of sources for the source type is supplied in the kwargs these will be placed in the dictionary. e.g. if we have 'point', 'gaussian' and 'sersic' source types, then default_sources(point=10, gaussian=20) will return an OrderedDict {'point': 10, 'gaussian': 20, 'sersic': 0} """ S = OrderedDict() total = 0 invalid_types = [t for t in kwargs.keys() if t not in SOURCE_VAR_TYPES] for t in invalid_types: montblanc.log.warning('Source type %s is not yet ' 'implemented in montblanc. ' 'Valid source types are %s' % (t, SOURCE_VAR_TYPES.keys())) # Zero all source types for k, v in SOURCE_VAR_TYPES.iteritems(): # Try get the number of sources for this source # from the kwargs value = kwargs.get(k, 0) try: value = int(value) except ValueError: raise TypeError(('Supplied value %s ' 'for source %s cannot be ' 'converted to an integer') % \ (value, k)) total += value S[k] = value # Add a point source if no others exist if total == 0: S[POINT_TYPE] = 1 return S
Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 } def sources_to_nr_vars(sources): """ Converts a source type to number of sources mapping into a source numbering variable to number of sources mapping. If, for example, we have 'point', 'gaussian' and 'sersic' source types, then passing the following dict as an argument sources_to_nr_vars({'point':10, 'gaussian': 20}) will return an OrderedDict {'npsrc': 10, 'ngsrc': 20, 'nssrc': 0 } """ sources = default_sources(**sources) try: return OrderedDict((SOURCE_VAR_TYPES[name], nr) for name, nr in sources.iteritems()) except KeyError as e: raise KeyError(( 'No source type ''%s'' is ' 'registered. Valid source types ' 'are %s') % (e, SOURCE_VAR_TYPES.keys()))
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. def source_range_tuple(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ starts = np.array([0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) ends = np.array([nr_var_dict[nr_var] if nr_var in nr_var_dict else 0 for nr_var in SOURCE_VAR_TYPES.itervalues()]) sum_counts = np.cumsum(ends) idx = np.arange(len(starts)) # Find the intervals containing the # start and ending indices start_idx, end_idx = np.searchsorted( sum_counts, [start, end], side='right') # Handle edge cases if end >= sum_counts[-1]: end = sum_counts[-1] end_idx = len(sum_counts) - 1 # Find out which variable counts fall within the range # of the supplied indices and zero those outside this range invalid = np.logical_not(np.logical_and(start_idx <= idx, idx <= end_idx)) starts[invalid] = ends[invalid] = 0 # Modify the associated starting and ending positions starts[start_idx] = start ends[end_idx] = end if start >= sum_counts[0]: starts[start_idx] -= sum_counts[start_idx-1] if end >= sum_counts[0]: ends[end_idx] -= sum_counts[end_idx-1] return OrderedDict((n, (starts[i], ends[i])) for i, n in enumerate(SOURCE_VAR_TYPES.values()))
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. def source_range(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing tuples of the start and end index for each source variable type. """ return OrderedDict((k, e-s) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type. def source_range_slices(start, end, nr_var_dict): """ Given a range of source numbers, as well as a dictionary containing the numbers of each source, returns a dictionary containing slices for each source variable type. """ return OrderedDict((k, slice(s,e,1)) for k, (s, e) in source_range_tuple(start, end, nr_var_dict).iteritems())
Return a lm coordinate array to montblanc def point_lm(self, context): """ Return a lm coordinate array to montblanc """ lm = np.empty(context.shape, context.dtype) # Print the array schema montblanc.log.info(context.array_schema.shape) # Print the space of iteration montblanc.log.info(context.iter_args) (ls, us) = context.dim_extents('npsrc') lm[:,0] = 0.0008 lm[:,1] = 0.0036 lm[:,:] = 0 return lm
Return a stokes parameter array to montblanc def point_stokes(self, context): """ Return a stokes parameter array to montblanc """ stokes = np.empty(context.shape, context.dtype) stokes[:,:,0] = 1 stokes[:,:,1:4] = 0 return stokes
Return a reference frequency array to montblanc def ref_frequency(self, context): """ Return a reference frequency array to montblanc """ ref_freq = np.empty(context.shape, context.dtype) ref_freq[:] = 1.415e9 return ref_freq
Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool def update(self, scopes=[], add_scopes=[], rm_scopes=[], note='', note_url=''): """Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool """ success = False json = None if scopes: d = {'scopes': scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {'add_scopes': add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {'remove_scopes': rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {'note': note, 'note_url': note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s def iter_labels(self, number=-1, etag=None): """Iterate over the labels for every issue associated with this milestone. .. versionchanged:: 0.9 Add etag parameter. :param int number: (optional), number of labels to return. Default: -1 returns all available labels. :param str etag: (optional), ETag header from a previous response :returns: generator of :class:`Label <github3.issues.label.Label>`\ s """ url = self._build_url('labels', base_url=self._api) return self._iter(int(number), url, Label, etag=etag)
Get reference to currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- fnc : function reference Currently running function def current_function(frame): """ Get reference to currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- fnc : function reference Currently running function """ if frame is None: return None code = frame.f_code # Attempting to extract the function reference for these calls appears # to be problematic if code.co_name == '__del__' or code.co_name == '_remove' or \ code.co_name == '_removeHandlerRef': return None try: # Solution follows suggestion at http://stackoverflow.com/a/37099372 lst = [referer for referer in gc.get_referrers(code) if getattr(referer, "__code__", None) is code and inspect.getclosurevars(referer).nonlocals.items() <= frame.f_locals.items()] if lst: return lst[0] else: return None except ValueError: # inspect.getclosurevars can fail with ValueError: Cell is empty return None
Get name of module of currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- modname : string Currently running function module name def current_module_name(frame): """ Get name of module of currently running function from inspect/trace stack frame. Parameters ---------- frame : stack frame Stack frame obtained via trace or inspect Returns ------- modname : string Currently running function module name """ if frame is None: return None if hasattr(frame.f_globals, '__name__'): return frame.f_globals['__name__'] else: mod = inspect.getmodule(frame) if mod is None: return '' else: return mod.__name__
Build a record of called functions using the trace mechanism def _trace(self, frame, event, arg): """ Build a record of called functions using the trace mechanism """ # Return if this is not a function call if event != 'call': return # Filter calling and called functions by module names src_mod = current_module_name(frame.f_back) dst_mod = current_module_name(frame) # Avoid tracing the tracer (specifically, call from # ContextCallTracer.__exit__ to CallTracer.stop) if src_mod == __modulename__ or dst_mod == __modulename__: return # Apply source and destination module filters if not self.srcmodflt.match(src_mod): return if not self.dstmodflt.match(dst_mod): return # Get calling and called functions src_func = current_function(frame.f_back) dst_func = current_function(frame) # Filter calling and called functions by qnames if not self.srcqnmflt.match(function_qname(src_func)): return if not self.dstqnmflt.match(function_qname(dst_func)): return # Get calling and called function full names src_name = function_fqname(src_func) dst_name = function_fqname(dst_func) # Modify full function names if necessary if self.fnmsub is not None: src_name = re.sub(self.fnmsub[0], self.fnmsub[1], src_name) dst_name = re.sub(self.fnmsub[0], self.fnmsub[1], dst_name) # Update calling function count if src_func is not None: if src_name in self.fncts: self.fncts[src_name][0] += 1 else: self.fncts[src_name] = [1, 0] # Update called function count if dst_func is not None and src_func is not None: if dst_name in self.fncts: self.fncts[dst_name][1] += 1 else: self.fncts[dst_name] = [0, 1] # Update caller/calling pair count if dst_func is not None and src_func is not None: key = (src_name, dst_name) if key in self.calls: self.calls[key] += 1 else: self.calls[key] = 1
Stop tracing def stop(self): """Stop tracing""" # Stop tracing sys.settrace(None) # Build group structure if group filter is defined if self.grpflt is not None: # Iterate over graph nodes (functions) for k in self.fncts: # Construct group identity string m = self.grpflt.search(k) # If group identity string found, append current node # to that group if m is not None: ms = m.group(0) if ms in self.group: self.group[ms].append(k) else: self.group[ms] = [k, ]
Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings def _clrgen(n, h0, hr): """Default colour generating function Parameters ---------- n : int Number of colours to generate h0 : float Initial H value in HSV colour specification hr : float Size of H value range to use for colour generation (final H value is h0 + hr) Returns ------- clst : list of strings List of HSV format colour specification strings """ n0 = n if n == 1 else n-1 clst = ['%f,%f,%f' % (h0 + hr*hi/n0, 0.35, 0.85) for hi in range(n)] return clst
Construct call graph Parameters ---------- fnm : None or string, optional (default None) Filename of graph file to be written. File type is determined by the file extentions (e.g. dot for 'graph.dot' and SVG for 'graph.svg'). If None, a file is not written. size : string or None, optional (default None) Graph image size specification string. fntsz : int or None, optional (default None) Font size for text. fntnm : string or None, optional (default None) Font family specification string. clrgen : function or None, optional (default None) Function to call to generate the group colours. This function should take an integer specifying the number of groups as an argument and return a list of graphviz-compatible colour specification strings. rmsz : bool, optional (default False) If True, remove the width and height specifications from an SVG format output file so that the size scales properly when viewed in a web browser prog : string, optional (default 'dot') Name of graphviz layout program to use. Returns ------- pgr : pygraphviz.AGraph Call graph of traced function calls def graph(self, fnm=None, size=None, fntsz=None, fntfm=None, clrgen=None, rmsz=False, prog='dot'): """ Construct call graph Parameters ---------- fnm : None or string, optional (default None) Filename of graph file to be written. File type is determined by the file extentions (e.g. dot for 'graph.dot' and SVG for 'graph.svg'). If None, a file is not written. size : string or None, optional (default None) Graph image size specification string. fntsz : int or None, optional (default None) Font size for text. fntnm : string or None, optional (default None) Font family specification string. clrgen : function or None, optional (default None) Function to call to generate the group colours. This function should take an integer specifying the number of groups as an argument and return a list of graphviz-compatible colour specification strings. rmsz : bool, optional (default False) If True, remove the width and height specifications from an SVG format output file so that the size scales properly when viewed in a web browser prog : string, optional (default 'dot') Name of graphviz layout program to use. Returns ------- pgr : pygraphviz.AGraph Call graph of traced function calls """ # Default colour generation function if clrgen is None: clrgen = lambda n: self._clrgen(n, 0.330, 0.825) # Generate color list clrlst = clrgen(len(self.group)) # Initialise a pygraphviz graph g = pgv.AGraph(strict=False, directed=True, landscape=False, rankdir='LR', newrank=True, fontsize=fntsz, fontname=fntfm, size=size, ratio='compress', color='black', bgcolor='#ffffff00') # Set graph attributes g.node_attr.update(penwidth=0.25, shape='box', style='rounded,filled') # Iterate over functions adding them as graph nodes for k in self.fncts: g.add_node(k, fontsize=fntsz, fontname=fntfm) # If lnksub regex pair is provided, compute an href link # target from the node name and add it as an attribute to # the node if self.lnksub is not None: lnktgt = re.sub(self.lnksub[0], self.lnksub[1], k) g.get_node(k).attr.update(href=lnktgt, target="_top") # If function has no calls to it, set its rank to "source" if self.fncts[k][1] == 0: g.get_node(k).attr.update(rank='source') # If groups defined, construct a subgraph for each and add the # nodes in each group to the corresponding subgraph if self.group: fngrpnm = {} # Iterate over group number/group name pairs for k in zip(range(len(self.group)), sorted(self.group)): g.add_subgraph(self.group[k[1]], name='cluster_' + k[1], label=k[1], penwidth=2, style='dotted', pencolor=clrlst[k[0]]) # Iterate over nodes in current group for l in self.group[k[1]]: # Create record of function group number fngrpnm[l] = k[0] # Set common group colour for current node g.get_node(l).attr.update(fillcolor=clrlst[k[0]]) # Iterate over function calls, adding each as an edge for k in self.calls: # If groups defined, set edge colour according to group of # calling function, otherwise set a standard colour if self.group: g.add_edge(k[0], k[1], penwidth=2, color=clrlst[fngrpnm[k[0]]]) else: g.add_edge(k[0], k[1], color='grey') # Call layout program g.layout(prog=prog) # Write graph file if filename provided if fnm is not None: ext = os.path.splitext(fnm)[1] if ext == '.dot': g.write(fnm) else: if ext == '.svg' and rmsz: img = g.draw(format='svg').decode('utf-8') cp = re.compile(r'\n<svg width=\"[^\"]*\" ' 'height=\"[^\"]*\"') img = cp.sub(r'\n<svg', img, count=1) with open(fnm, 'w') as fd: fd.write(img) else: g.draw(fnm) # Return graph object return g
Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` def create_review_comment(self, body, commit_id, path, position): """Create a review comment on this pull request. All parameters are required by the GitHub API. :param str body: The comment text itself :param str commit_id: The SHA of the commit to comment on :param str path: The relative path of the file to comment on :param int position: The line index in the diff to comment on. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self._api) data = {'body': body, 'commit_id': commit_id, 'path': path, 'position': int(position)} json = self._json(self._post(url, data=data), 201) return ReviewComment(json, self) if json else None
Return the diff def diff(self): """Return the diff""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.diff'}) return resp.content if self._boolean(resp, 200, 404) else None
Checks to see if the pull request was merged. :returns: bool def is_merged(self): """Checks to see if the pull request was merged. :returns: bool """ url = self._build_url('merge', base_url=self._api) return self._boolean(self._get(url), 204, 404)
Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """ url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s def iter_files(self, number=-1, etag=None): """Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s """ url = self._build_url('files', base_url=self._api) return self._iter(int(number), url, PullFile, etag=etag)
Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s def iter_issue_comments(self, number=-1, etag=None): """Iterate over the issue comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueComment <IssueComment>`\ s """ url = self._build_url(base_url=self.links['comments']) return self._iter(int(number), url, IssueComment, etag=etag)
Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool def merge(self, commit_message='', sha=None): """Merge this pull request. :param str commit_message: (optional), message to be used for the merge commit :returns: bool """ parameters = {'commit_message': commit_message} if sha: parameters['sha'] = sha url = self._build_url('merge', base_url=self._api) json = self._json(self._put(url, data=dumps(parameters)), 200) self.merge_commit_sha = json['sha'] return json['merged']
Return the patch def patch(self): """Return the patch""" resp = self._get(self._api, headers={'Accept': 'application/vnd.github.patch'}) return resp.content if self._boolean(resp, 200, 404) else None
Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool def update(self, title=None, body=None, state=None): """Update this pull request. :param str title: (optional), title of the pull :param str body: (optional), body of the pull request :param str state: (optional), ('open', 'closed') :returns: bool """ data = {'title': title, 'body': body, 'state': state} json = None self._remove_none(data) if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` def reply(self, body): """Reply to this review comment with a new review comment. :param str body: The text of the comment. :returns: The created review comment. :rtype: :class:`~github3.pulls.ReviewComment` """ url = self._build_url('comments', base_url=self.pull_request_url) index = self._api.rfind('/') + 1 in_reply_to = self._api[index:] json = self._json(self._post(url, data={ 'body': body, 'in_reply_to': in_reply_to }), 201) return ReviewComment(json, self) if json else None
Add ``login`` to this team. :returns: bool def add_member(self, login): """Add ``login`` to this team. :returns: bool """ warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._put(url), 204, 404)
Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool def add_repo(self, repo): """Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool """ url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._put(url), 204, 404)
Edit this team. :param str name: (required) :param str permission: (optional), ('pull', 'push', 'admin') :returns: bool def edit(self, name, permission=''): """Edit this team. :param str name: (required) :param str permission: (optional), ('pull', 'push', 'admin') :returns: bool """ if name: data = {'name': name, 'permission': permission} json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
Checks if this team has access to ``repo`` :param str repo: (required), form: 'user/repo' :returns: bool def has_repo(self, repo): """Checks if this team has access to ``repo`` :param str repo: (required), form: 'user/repo' :returns: bool """ url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._get(url), 204, 404)
Invite the user to join this team. This returns a dictionary like so:: {'state': 'pending', 'url': 'https://api.github.com/teams/...'} :param str username: (required), user to invite to join this team. :returns: dictionary def invite(self, username): """Invite the user to join this team. This returns a dictionary like so:: {'state': 'pending', 'url': 'https://api.github.com/teams/...'} :param str username: (required), user to invite to join this team. :returns: dictionary """ url = self._build_url('memberships', username, base_url=self._api) return self._json(self._put(url), 200)
Retrieve the membership information for the user. :param str username: (required), name of the user :returns: dictionary def membership_for(self, username): """Retrieve the membership information for the user. :param str username: (required), name of the user :returns: dictionary """ url = self._build_url('memberships', username, base_url=self._api) json = self._json(self._get(url), 200) return json or {}
Remove ``login`` from this team. :param str login: (required), login of the member to remove :returns: bool def remove_member(self, login): """Remove ``login`` from this team. :param str login: (required), login of the member to remove :returns: bool """ warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) url = self._build_url('members', login, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Revoke this user's team membership. :param str username: (required), name of the team member :returns: bool def revoke_membership(self, username): """Revoke this user's team membership. :param str username: (required), name of the team member :returns: bool """ url = self._build_url('memberships', username, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Remove ``repo`` from this team. :param str repo: (required), form: 'user/repo' :returns: bool def remove_repo(self, repo): """Remove ``repo`` from this team. :param str repo: (required), form: 'user/repo' :returns: bool """ url = self._build_url('repos', repo, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Add ``login`` to ``team`` and thereby to this organization. .. warning:: This method is no longer valid. To add a member to a team, you must now retrieve the team directly, and use the ``invite`` method. Any user that is to be added to an organization, must be added to a team as per the GitHub api. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the user when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_member`` on that team directly. :param str login: (required), login name of the user to be added :param str team: (required), team name :returns: bool def add_member(self, login, team): """Add ``login`` to ``team`` and thereby to this organization. .. warning:: This method is no longer valid. To add a member to a team, you must now retrieve the team directly, and use the ``invite`` method. Any user that is to be added to an organization, must be added to a team as per the GitHub api. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the user when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_member`` on that team directly. :param str login: (required), login name of the user to be added :param str team: (required), team name :returns: bool """ warnings.warn( 'This is no longer supported by the GitHub API, see ' 'https://developer.github.com/changes/2014-09-23-one-more-week' '-before-the-add-team-member-api-breaking-change/', DeprecationWarning) for t in self.iter_teams(): if team == t.name: return t.add_member(login) return False
Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_repo`` on that team directly. :param str repo: (required), form: 'user/repo' :param str team: (required), team name def add_repo(self, repo, team): """Add ``repo`` to ``team``. .. note:: This method is of complexity O(n). This iterates over all teams in your organization and only adds the repo when the team name matches the team parameter above. If you want constant time, you should retrieve the team and call ``add_repo`` on that team directly. :param str repo: (required), form: 'user/repo' :param str team: (required), team name """ for t in self.iter_teams(): if team == t.name: return t.add_repo(repo) return False
Create a repository for this organization if the authenticated user is a member. :param str name: (required), name of the repository :param str description: (optional) :param str homepage: (optional) :param bool private: (optional), If ``True``, create a private repository. API default: ``False`` :param bool has_issues: (optional), If ``True``, enable issues for this repository. API default: ``True`` :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. API default: ``True`` :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. API default: ``True`` :param int team_id: (optional), id of the team that will be granted access to this repository :param bool auto_init: (optional), auto initialize the repository. :param str gitignore_template: (optional), name of the template; this is ignored if auto_int = False. :returns: :class:`Repository <github3.repos.Repository>` .. warning: ``name`` should be no longer than 100 characters def create_repo(self, name, description='', homepage='', private=False, has_issues=True, has_wiki=True, has_downloads=True, team_id=0, auto_init=False, gitignore_template=''): """Create a repository for this organization if the authenticated user is a member. :param str name: (required), name of the repository :param str description: (optional) :param str homepage: (optional) :param bool private: (optional), If ``True``, create a private repository. API default: ``False`` :param bool has_issues: (optional), If ``True``, enable issues for this repository. API default: ``True`` :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. API default: ``True`` :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. API default: ``True`` :param int team_id: (optional), id of the team that will be granted access to this repository :param bool auto_init: (optional), auto initialize the repository. :param str gitignore_template: (optional), name of the template; this is ignored if auto_int = False. :returns: :class:`Repository <github3.repos.Repository>` .. warning: ``name`` should be no longer than 100 characters """ url = self._build_url('repos', base_url=self._api) data = {'name': name, 'description': description, 'homepage': homepage, 'private': private, 'has_issues': has_issues, 'has_wiki': has_wiki, 'has_downloads': has_downloads, 'auto_init': auto_init, 'gitignore_template': gitignore_template} if team_id > 0: data.update({'team_id': team_id}) json = self._json(self._post(url, data), 201) return Repository(json, self) if json else None
Conceal ``login``'s membership in this organization. :returns: bool def conceal_member(self, login): """Conceal ``login``'s membership in this organization. :returns: bool """ url = self._build_url('public_members', login, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Assuming the authenticated user owns this organization, create and return a new team. :param str name: (required), name to be given to the team :param list repo_names: (optional) repositories, e.g. ['github/dotfiles'] :param str permission: (optional), options: - ``pull`` -- (default) members can not push or administer repositories accessible by this team - ``push`` -- members can push and pull but not administer repositories accessible by this team - ``admin`` -- members can push, pull and administer repositories accessible by this team :returns: :class:`Team <Team>` def create_team(self, name, repo_names=[], permission=''): """Assuming the authenticated user owns this organization, create and return a new team. :param str name: (required), name to be given to the team :param list repo_names: (optional) repositories, e.g. ['github/dotfiles'] :param str permission: (optional), options: - ``pull`` -- (default) members can not push or administer repositories accessible by this team - ``push`` -- members can push and pull but not administer repositories accessible by this team - ``admin`` -- members can push, pull and administer repositories accessible by this team :returns: :class:`Team <Team>` """ data = {'name': name, 'repo_names': repo_names, 'permission': permission} url = self._build_url('teams', base_url=self._api) json = self._json(self._post(url, data), 201) return Team(json, self._session) if json else None
Edit this organization. :param str billing_email: (optional) Billing email address (private) :param str company: (optional) :param str email: (optional) Public email address :param str location: (optional) :param str name: (optional) :returns: bool def edit(self, billing_email=None, company=None, email=None, location=None, name=None): """Edit this organization. :param str billing_email: (optional) Billing email address (private) :param str company: (optional) :param str email: (optional) Public email address :param str location: (optional) :param str name: (optional) :returns: bool """ json = None data = {'billing_email': billing_email, 'company': company, 'email': email, 'location': location, 'name': name} self._remove_none(data) if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
Check if the user with login ``login`` is a public member. :returns: bool def is_public_member(self, login): """Check if the user with login ``login`` is a public member. :returns: bool """ url = self._build_url('public_members', login, base_url=self._api) return self._boolean(self._get(url), 204, 404)
Iterate over repos for this organization. :param str type: (optional), accepted values: ('all', 'public', 'member', 'private', 'forks', 'sources'), API default: 'all' :param int number: (optional), number of members to return. Default: -1 will return all available. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Repository <github3.repos.Repository>` def iter_repos(self, type='', number=-1, etag=None): """Iterate over repos for this organization. :param str type: (optional), accepted values: ('all', 'public', 'member', 'private', 'forks', 'sources'), API default: 'all' :param int number: (optional), number of members to return. Default: -1 will return all available. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Repository <github3.repos.Repository>` """ url = self._build_url('repos', base_url=self._api) params = {} if type in ('all', 'public', 'member', 'private', 'forks', 'sources'): params['type'] = type return self._iter(int(number), url, Repository, params, etag)
Iterate over teams that are part of this organization. :param int number: (optional), number of teams to return. Default: -1 returns all available teams. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Team <Team>`\ s def iter_teams(self, number=-1, etag=None): """Iterate over teams that are part of this organization. :param int number: (optional), number of teams to return. Default: -1 returns all available teams. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Team <Team>`\ s """ url = self._build_url('teams', base_url=self._api) return self._iter(int(number), url, Team, etag=etag)
Make ``login``'s membership in this organization public. :returns: bool def publicize_member(self, login): """Make ``login``'s membership in this organization public. :returns: bool """ url = self._build_url('public_members', login, base_url=self._api) return self._boolean(self._put(url), 204, 404)
Remove ``repo`` from ``team``. :param str repo: (required), form: 'user/repo' :param str team: (required) :returns: bool def remove_repo(self, repo, team): """Remove ``repo`` from ``team``. :param str repo: (required), form: 'user/repo' :param str team: (required) :returns: bool """ for t in self.iter_teams(): if team == t.name: return t.remove_repo(repo) return False
Returns Team object with information about team specified by ``team_id``. :param int team_id: (required), unique id for the team :returns: :class:`Team <Team>` def team(self, team_id): """Returns Team object with information about team specified by ``team_id``. :param int team_id: (required), unique id for the team :returns: :class:`Team <Team>` """ json = None if int(team_id) > 0: url = self._build_url('teams', str(team_id)) json = self._json(self._get(url), 200) return Team(json, self._session) if json else None
Edit the user's membership. :param str state: (required), the state the membership should be in. Only accepts ``"active"``. :returns: itself def edit(self, state): """Edit the user's membership. :param str state: (required), the state the membership should be in. Only accepts ``"active"``. :returns: itself """ if state and state.lower() == 'active': data = dumps({'state': state.lower()}) json = self._json(self._patch(self._api, data=data)) self._update_attributes(json) return self
Users with push access to the repository can delete a release. :returns: True if successful; False if not successful def delete(self): """Users with push access to the repository can delete a release. :returns: True if successful; False if not successful """ url = self._api return self._boolean( self._delete(url, headers=Release.CUSTOM_HEADERS), 204, 404 )
Users with push access to the repository can edit a release. If the edit is successful, this object will update itself. :param str tag_name: (optional), Name of the tag to use :param str target_commitish: (optional), The "commitish" value that determines where the Git tag is created from. Defaults to the repository's default branch. :param str name: (optional), Name of the release :param str body: (optional), Description of the release :param boolean draft: (optional), True => Release is a draft :param boolean prerelease: (optional), True => Release is a prerelease :returns: True if successful; False if not successful def edit(self, tag_name=None, target_commitish=None, name=None, body=None, draft=None, prerelease=None): """Users with push access to the repository can edit a release. If the edit is successful, this object will update itself. :param str tag_name: (optional), Name of the tag to use :param str target_commitish: (optional), The "commitish" value that determines where the Git tag is created from. Defaults to the repository's default branch. :param str name: (optional), Name of the release :param str body: (optional), Description of the release :param boolean draft: (optional), True => Release is a draft :param boolean prerelease: (optional), True => Release is a prerelease :returns: True if successful; False if not successful """ url = self._api data = { 'tag_name': tag_name, 'target_commitish': target_commitish, 'name': name, 'body': body, 'draft': draft, 'prerelease': prerelease, } self._remove_none(data) r = self._session.patch( url, data=json.dumps(data), headers=Release.CUSTOM_HEADERS ) successful = self._boolean(r, 200, 404) if successful: # If the edit was successful, let's update the object. self.__init__(r.json(), self) return successful