_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q9900 | PENMANCodec.handle_triple | train | def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
| python | {
"resource": ""
} |
q9901 | PENMANCodec._encode_penman | train | def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
| python | {
"resource": ""
} |
q9902 | Graph.reentrancies | train | def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
| python | {
"resource": ""
} |
q9903 | check_1d | train | def check_1d(inp):
"""
Check input to be a vector. Converts lists to np.ndarray.
Parameters
----------
inp : obj
Input vector
Returns
-------
numpy.ndarray or None
Input vector or None
Examples
--------
>>> check_1d([0, 1, 2, 3])
[0, 1, 2, 3]
| python | {
"resource": ""
} |
q9904 | check_2d | train | def check_2d(inp):
"""
Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
| python | {
"resource": ""
} |
q9905 | graph_to_laplacian | train | def graph_to_laplacian(G, normalized=True):
"""
Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None
"""
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
| python | {
"resource": ""
} |
q9906 | netlsd | train | def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes NetLSD signature from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
kernel : str
Either 'heat' or 'wave'. Type of a kernel to use for computation.
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
NetLSD signature
"""
if kernel not in {'heat', 'wave'}:
raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel))
if not isinstance(normalized_laplacian, bool):
raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian))
if not isinstance(eigenvalues, (int, tuple, str)):
raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues)))
if not | python | {
"resource": ""
} |
q9907 | heat | train | def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes heat kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
| python | {
"resource": ""
} |
q9908 | wave | train | def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes wave kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
| python | {
"resource": ""
} |
q9909 | _hkt | train | def _hkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes heat kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
| python | {
"resource": ""
} |
q9910 | _wkt | train | def _wkt(eivals, timescales, normalization, normalized_laplacian):
"""
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
| python | {
"resource": ""
} |
q9911 | SortedListWithKey.clear | train | def clear(self):
"""Remove all the elements from the list."""
self._len = 0
del self._maxes[:]
| python | {
"resource": ""
} |
q9912 | SortedListWithKey.islice | train | def islice(self, start=None, stop=None, reverse=False):
"""
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
"""
_len = self._len
if not _len:
return iter(())
start, stop, step = self._slice(slice(start, stop))
| python | {
"resource": ""
} |
q9913 | SortedListWithKey.copy | train | def copy(self):
"""Return a shallow copy of the sorted list."""
| python | {
"resource": ""
} |
q9914 | not26 | train | def not26(func):
"""Function decorator for methods not implemented in Python 2.6."""
@wraps(func)
def errfunc(*args, **kwargs):
raise NotImplementedError
| python | {
"resource": ""
} |
q9915 | SortedDict.copy | train | def copy(self):
"""Return a shallow copy of the sorted dictionary."""
| python | {
"resource": ""
} |
q9916 | SummaryTracker.create_summary | train | def create_summary(self):
"""Return a summary.
See also the notes on ignore_self in the class as well as the
initializer documentation.
"""
if not self.ignore_self:
res = summary.summarize(muppy.get_objects())
else:
# If the user requested the data required to store summaries to be
# ignored in the summaries, we need to identify all objects which
# are related to each summary stored.
# Thus we build a list of all objects used for summary storage as
# well as a dictionary which tells us how often an object is
# referenced by the summaries.
# During this identification process, more objects are referenced,
# namely int objects identifying referenced objects as well as the
# correspondind count.
# For all these objects it will be checked wether they are
# referenced from outside the monitor's scope. | python | {
"resource": ""
} |
q9917 | SummaryTracker.diff | train | def diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
res = None
if summary2 is None:
self.s1 = self.create_summary()
if summary1 is None:
| python | {
"resource": ""
} |
q9918 | SummaryTracker.print_diff | train | def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 | python | {
"resource": ""
} |
q9919 | ObjectTracker._get_objects | train | def _get_objects(self, ignore=[]):
"""Get all currently existing objects.
XXX - ToDo: This method is a copy&paste from muppy.get_objects, but
some modifications are applied. Specifically, it allows to ignore
objects (which includes the current frame).
keyword arguments
ignore -- list of objects to ignore
"""
def remove_ignore(objects, ignore=[]):
# remove all objects listed in the ignore list
res = []
for o in objects:
if not compat.object_in_list(o, ignore):
res.append(o)
return res
tmp = gc.get_objects()
ignore.append(inspect.currentframe()) #PYCHOK change ignore
ignore.append(self) #PYCHOK change ignore
if hasattr(self, 'o0'): ignore.append(self.o0) #PYCHOK change ignore
if hasattr(self, 'o1'): ignore.append(self.o1) #PYCHOK change ignore
ignore.append(ignore) #PYCHOK change ignore
ignore.append(remove_ignore) #PYCHOK change ignore
# this implies that referenced objects are also ignored
tmp = remove_ignore(tmp, ignore)
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects | python | {
"resource": ""
} |
q9920 | ObjectTracker.get_diff | train | def get_diff(self, ignore=[]):
"""Get the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
ignore.append(inspect.currentframe()) #PYCHOK change ignore
self.o1 = self._get_objects(ignore)
diff | python | {
"resource": ""
} |
q9921 | ObjectTracker.print_diff | train | def print_diff(self, ignore=[]):
"""Print the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
ignore.append(inspect.currentframe()) #PYCHOK change ignore
diff = self.get_diff(ignore)
print("Added objects:")
| python | {
"resource": ""
} |
q9922 | jaccard | train | def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, | python | {
"resource": ""
} |
q9923 | sorensen | train | def sorensen(seq1, seq2):
"""Compute the Sorensen distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and | python | {
"resource": ""
} |
q9924 | _long2bytes | train | def _long2bytes(n, blocksize=0):
"""Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize.
"""
# After much testing, this algorithm was deemed to be the fastest.
s = ''
pack = struct.pack
while n > 0:
### CHANGED FROM '>I' TO '<I'. (DCG)
s = pack('<I', n & 0xffffffffL) + s
### --------------------------
n = n >> 32
# Strip off leading zeros.
for i in range(len(s)):
if s[i] | python | {
"resource": ""
} |
q9925 | MD5.init | train | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
# Load magic initialization constants.
| python | {
"resource": ""
} |
q9926 | MeliaeAdapter.value | train | def value( self, node, parent=None ):
"""Return value used to compare size of this node"""
# this is the *weighted* size/contribution of the node
try:
return node['contribution']
except KeyError, err:
| python | {
"resource": ""
} |
q9927 | MeliaeAdapter.label | train | def label( self, node ):
"""Return textual description of this node"""
result = []
if node.get('type'):
result.append( node['type'] )
if node.get('name' ):
result.append( node['name'] )
elif node.get('value') is not None:
result.append( unicode(node['value'])[:32])
if 'module' in node and not node['module'] in result:
result.append( ' in %s'%( node['module'] ))
if node.get( 'size' ):
result.append( '%s'%( mb( node['size'] )))
if | python | {
"resource": ""
} |
q9928 | MeliaeAdapter.best_parent | train | def best_parent( self, node, tree_type=None ):
"""Choose the best parent for a given node"""
parents = self.parents(node)
selected_parent = None
if node['type'] == 'type':
module = ".".join( node['name'].split( '.' )[:-1] )
if module:
for mod in parents:
if mod['type'] == 'module' and mod['name'] == module:
| python | {
"resource": ""
} |
q9929 | Stats.load_stats | train | def load_stats(self, fdump):
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, type('')):
| python | {
"resource": ""
} |
q9930 | Stats.annotate_snapshot | train | def annotate_snapshot(self, snapshot):
"""
Store additional statistical data in snapshot.
"""
if hasattr(snapshot, 'classes'):
return
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if tobj.birth < snapshot.timestamp and \
(tobj.death is None or tobj.death > snapshot.timestamp):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0 | python | {
"resource": ""
} |
q9931 | ConsoleStats.print_object | train | def print_object(self, tobj):
"""
Print the gathered information of object `tobj` in human-readable format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=1), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=1),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
| python | {
"resource": ""
} |
q9932 | ConsoleStats.print_stats | train | def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
| python | {
"resource": ""
} |
q9933 | ConsoleStats.print_summary | train | def print_summary(self):
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY '+'-'*66+'\n')
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
| python | {
"resource": ""
} |
q9934 | HtmlStats.print_class_details | train | def print_class_details(self, fname, classname):
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc, 'cls':classname, 'total': pp(merged.size)
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj | python | {
"resource": ""
} |
q9935 | HtmlStats.relative_path | train | def relative_path(self, filepath, basepath=None):
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if | python | {
"resource": ""
} |
q9936 | HtmlStats.create_title_page | train | def create_title_page(self, filename, title=''):
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
| python | {
"resource": ""
} |
q9937 | HtmlStats.create_lifetime_chart | train | def create_lifetime_chart(self, classname, filename=''):
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname+" lifetime")
cnt = []
| python | {
"resource": ""
} |
q9938 | HtmlStats.create_snapshot_chart | train | def create_snapshot_chart(self, filename=''):
"""
Create chart that depicts the memory allocation over time apportioned to
the tracked classes.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, fill, legend, savefig
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots]
if max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum'])/(1024*1024) for fp in self.snapshots]
sz = [sx+sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append( ((xp, yp), {'label': cn}) )
poly_labels.append(cn)
base | python | {
"resource": ""
} |
q9939 | HtmlStats.create_pie_chart | train | def create_pie_chart(self, snapshot, filename=''):
"""
Create a pie chart that depicts the distribution of the allocated memory
for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
| python | {
"resource": ""
} |
q9940 | HtmlStats.create_html | train | def create_html(self, fname, title="ClassTracker Statistics"):
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {}
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
| python | {
"resource": ""
} |
q9941 | Path.write_bytes | train | def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' | python | {
"resource": ""
} |
q9942 | Path.write_text | train | def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not | python | {
"resource": ""
} |
q9943 | sort_group | train | def sort_group(d, return_only_first=False):
''' Sort a dictionary of relative paths and cluster equal paths together at the same time '''
# First, sort the paths in order (this must be a couple: (parent_dir, filename), so that there's no ambiguity because else a file at root will be considered as being after a folder/file since the ordering is done alphabetically without any notion of tree structure).
d_sort = sort_dict_of_paths(d)
# Pop the first item in the ordered list
base_elt = (-1, None)
while (base_elt[1] is None and d_sort):
base_elt = d_sort.pop(0)
# No element, then we just return
if base_elt[1] is None:
return None
# Else, we will now group equivalent files together (remember we are working on multiple directories, so we can have multiple equivalent relative filepaths, but of course the absolute filepaths are different).
else:
# Init by creating the first group and pushing the first ordered filepath into the first group
lst = []
lst.append([base_elt])
if d_sort:
# For each subsequent filepath
for elt in d_sort:
# If the filepath is not empty (generator died)
| python | {
"resource": ""
} |
q9944 | RefBrowser.get_tree | train | def get_tree(self):
"""Get a tree of referrers of the root object."""
self.ignore.append(inspect.currentframe()) | python | {
"resource": ""
} |
q9945 | RefBrowser._get_tree | train | def _get_tree(self, root, maxdepth):
"""Workhorse of the get_tree implementation.
This is an recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build.
"""
self.ignore.append(inspect.currentframe())
res = _Node(root, self.str_func) #PYCHOK use root parameter
self.already_included.add(id(root)) #PYCHOK use root parameter
if maxdepth == 0:
return res
objects = gc.get_referrers(root) #PYCHOK use root parameter
self.ignore.append(objects)
for o in objects:
# XXX: find a better way to ignore dict of _Node objects
if isinstance(o, dict):
sampleNode = _Node(1)
if list(sampleNode.__dict__.keys()) == list(o.keys()):
| python | {
"resource": ""
} |
q9946 | StreamBrowser.print_tree | train | def print_tree(self, tree=None):
""" Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject.
"""
| python | {
"resource": ""
} |
q9947 | StreamBrowser._print | train | def _print(self, tree, prefix, carryon):
"""Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines
"""
level = prefix.count(self.cross) + prefix.count(self.vline)
len_children = 0
if isinstance(tree , _Node):
len_children = len(tree.children)
# add vertex
prefix += str(tree)
# and as many spaces as the vertex is long
carryon += self.space * len(str(tree))
if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\
(len_children == 0):
self.stream.write(prefix+'\n')
return
else:
# add | python | {
"resource": ""
} |
q9948 | InteractiveBrowser.main | train | def main(self, standalone=False):
"""Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
"""
window = _Tkinter.Tk()
| python | {
"resource": ""
} |
q9949 | format_meter | train | def format_meter(n, total, elapsed, ncols=None, prefix='',
unit=None, unit_scale=False, ascii=False):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If None, only basic progress
statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If sepcified, dynamically
resizes the progress meter [default: None]. The fallback meter
width is 10.
prefix : str, optional
Prefix message (included in total width).
unit : str, optional
String that will be used to define the unit of each iteration.
[default: "it"]
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.). [default: False]
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters (1-9 #).
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# in case the total is wrong (n is above the total), then
# we switch to the mode without showing the total prediction
# (since ETA would be wrong anyway)
if total and n > total:
total = None
elapsed_str = format_interval(elapsed)
if elapsed:
if unit_scale:
rate = format_sizeof(n / elapsed, suffix='')
else:
rate = '{0:5.2f}'.format(n / elapsed)
else:
rate = '?'
rate_unit = unit if unit else 'it'
if not unit:
unit = ''
n_fmt = str(n)
total_fmt = str(total)
if unit_scale:
n_fmt = format_sizeof(n, suffix='')
if total:
total_fmt = format_sizeof(total, suffix='')
if total:
frac = n / total
percentage = frac * 100
remaining_str = format_interval(elapsed * (total-n) / n) if n else '?'
l_bar = '{1}{0:.0f}%|'.format(percentage, prefix) if prefix else \
| python | {
"resource": ""
} |
q9950 | getIcon | train | def getIcon( data ):
"""Return the data from the resource as a wxIcon"""
import cStringIO
stream = | python | {
"resource": ""
} |
q9951 | main | train | def main():
"""Mainloop for the application"""
logging.basicConfig(level=logging.INFO)
| python | {
"resource": ""
} |
q9952 | MainFrame.CreateMenuBar | train | def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
# self.packageMenuItem = menu.AppendCheckItem(
# ID_PACKAGE_VIEW, _('&File View'),
# _('View time spent by package/module')
# )
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
| python | {
"resource": ""
} |
q9953 | MainFrame.CreateSourceWindow | train | def CreateSourceWindow(self, tabs):
"""Create our source-view window for tabs"""
if editor and self.sourceCodeControl is None:
self.sourceCodeControl = wx.py.editwindow.EditWindow(
self.tabs, -1
)
| python | {
"resource": ""
} |
q9954 | MainFrame.SetupToolBar | train | def SetupToolBar(self):
"""Create the toolbar for common actions"""
tb = self.CreateToolBar(self.TBFLAGS)
tsize = (24, 24)
tb.ToolBitmapSize = tsize
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,
tsize)
tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open",
longHelp="Open a (c)Profile trace file")
if not osx:
tb.AddSeparator()
# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)
self.rootViewTool = tb.AddLabelTool(
ID_ROOT_VIEW, _("Root View"),
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),
shortHelp=_("Display the root of the current view tree (home view)")
)
self.rootViewTool = tb.AddLabelTool(
ID_BACK_VIEW, _("Back"),
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),
shortHelp=_("Back to the previously activated node in the call tree")
)
self.upViewTool = tb.AddLabelTool(
ID_UP_VIEW, _("Up"),
wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),
shortHelp=_("Go one level up the call tree (highest-percentage parent)")
)
if not osx:
tb.AddSeparator()
| python | {
"resource": ""
} |
q9955 | MainFrame.OnViewTypeTool | train | def OnViewTypeTool( self, event ):
"""When the user changes the selection, make that our selection"""
new = self.viewTypeTool.GetStringSelection()
| python | {
"resource": ""
} |
q9956 | MainFrame.SetPercentageView | train | def SetPercentageView(self, percentageView):
"""Set whether to display percentage or absolute values"""
self.percentageView = percentageView
self.percentageMenuItem.Check(self.percentageView)
| python | {
"resource": ""
} |
q9957 | MainFrame.OnUpView | train | def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
| python | {
"resource": ""
} |
q9958 | MainFrame.OnBackView | train | def OnBackView(self, event):
"""Request to move backward in the history"""
self.historyIndex -= 1
try:
| python | {
"resource": ""
} |
q9959 | MainFrame.OnRootView | train | def OnRootView(self, event):
"""Reset view to the root of the tree"""
self.adapter, tree, rows = self.RootNode()
| python | {
"resource": ""
} |
q9960 | MainFrame.OnNodeActivated | train | def OnNodeActivated(self, event):
"""Double-click or enter on a node in some control..."""
self.activated_node = self.selected_node = event.node
self.squareMap.SetModel(event.node, self.adapter)
self.squareMap.SetSelected( event.node )
if editor:
if self.SourceShowFile(event.node): | python | {
"resource": ""
} |
q9961 | MainFrame.RecordHistory | train | def RecordHistory(self):
"""Add the given node to the history-set"""
if not self.restoringHistory:
record = self.activated_node
if self.historyIndex < -1:
try:
del self.history[self.historyIndex+1:]
except AttributeError, err:
pass
| python | {
"resource": ""
} |
q9962 | MainFrame.RootNode | train | def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType ) | python | {
"resource": ""
} |
q9963 | MainFrame.SaveState | train | def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', | python | {
"resource": ""
} |
q9964 | MainFrame.LoadState | train | def LoadState( self, config_parser ):
"""Set our window state from the given config_parser instance"""
if not config_parser:
return
if (
not config_parser.has_section( 'window' ) or (
config_parser.has_option( 'window','maximized' ) and
config_parser.getboolean( 'window', 'maximized' )
)
):
self.Maximize(True)
try:
width,height,x,y = [
config_parser.getint( 'window',key )
for key in ['width','height','x','y']
]
self.SetPosition( (x,y))
self.SetSize( (width,height))
except ConfigParser.NoSectionError, err:
# the file isn't written yet, so don't even warn...
pass
except Exception, err:
# this is just convenience, if it breaks in *any* way, ignore it...
log.error(
"Unable to load window preferences, ignoring: %s", traceback.format_exc()
)
try:
| python | {
"resource": ""
} |
q9965 | is_file | train | def is_file(dirname):
'''Checks if a path is an actual file that exists'''
if not os.path.isfile(dirname):
msg = "{0} is not an existing file".format(dirname)
| python | {
"resource": ""
} |
q9966 | is_dir | train | def is_dir(dirname):
'''Checks if a path is an actual directory that exists'''
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
| python | {
"resource": ""
} |
q9967 | is_dir_or_file | train | def is_dir_or_file(dirname):
'''Checks if a path is an actual directory that exists or a file'''
if not os.path.isdir(dirname) and not os.path.isfile(dirname):
msg = "{0} is not a directory | python | {
"resource": ""
} |
q9968 | fullpath | train | def fullpath(relpath):
'''Relative path to absolute'''
if (type(relpath) is object | python | {
"resource": ""
} |
q9969 | remove_if_exist | train | def remove_if_exist(path): # pragma: no cover
"""Delete a file or a directory recursively if it exists, else no exception is raised"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
| python | {
"resource": ""
} |
q9970 | copy_any | train | def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
| python | {
"resource": ""
} |
q9971 | group_files_by_size | train | def group_files_by_size(fileslist, multi): # pragma: no cover
''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C)
'''
flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True))
if multi <= 1:
fgrouped = {}
i = 0
for x in flord.keys():
i += 1
fgrouped[i] = [[x]]
return fgrouped
fgrouped = {}
i = 0
while flord:
i += 1
| python | {
"resource": ""
} |
q9972 | group_files_by_size_fast | train | def group_files_by_size_fast(fileslist, nbgroups, mode=1): # pragma: no cover
'''Given a files list with sizes, output a list where the files are grouped in nbgroups per cluster.
Pseudo-code for algorithm in O(n log(g)) (thank's to insertion sort or binary search trees)
See for more infos: http://cs.stackexchange.com/questions/44406/fast-algorithm-for-clustering-groups-of-elements-given-their-size-time/44614#44614
For each file:
- If to-fill list is empty or file.size > first-key(to-fill):
* Create cluster c with file in first group g1
* Add to-fill[file.size].append([c, g2], [c, g3], ..., [c, gn])
- Else:
* ksize = first-key(to-fill)
* c, g = to-fill[ksize].popitem(0)
* Add file to cluster c in group g
* nsize = ksize - file.size
* if nsize > 0:
. to-fill[nsize].append([c, g])
. sort to-fill if not an automatic ordering structure
'''
ftofill = SortedList()
ftofill_pointer = {}
fgrouped = [] # [] or {}
ford = sorted(fileslist.iteritems(), key=lambda x: x[1])
last_cid = -1
while ford:
fname, fsize = ford.pop()
#print "----\n"+fname, fsize
#if ftofill: print "beforebranch", fsize, ftofill[-1]
#print ftofill
if not ftofill or fsize > ftofill[-1]:
last_cid += 1
#print "Branch A: create cluster %i" % last_cid
fgrouped.append([])
#fgrouped[last_cid] = []
fgrouped[last_cid].append([fname])
if mode==0:
for g in xrange(nbgroups-1, 0, -1):
fgrouped[last_cid].append([])
if not fsize in ftofill_pointer:
ftofill_pointer[fsize] = []
ftofill_pointer[fsize].append((last_cid, g))
ftofill.add(fsize)
else:
| python | {
"resource": ""
} |
q9973 | grouped_count_sizes | train | def grouped_count_sizes(fileslist, fgrouped): # pragma: no cover
'''Compute the total size per group and total number of files. Useful to check that everything is OK.'''
fsizes = {}
total_files = 0
allitems = None
if isinstance(fgrouped, dict):
allitems = fgrouped.iteritems()
elif isinstance(fgrouped, list):
allitems = enumerate(fgrouped)
for fkey, cluster in allitems:
fsizes[fkey] = []
for subcluster in cluster:
tot = 0
| python | {
"resource": ""
} |
q9974 | ConfigPanel.GetOptions | train | def GetOptions(self):
"""
returns the collective values from all of the
widgets contained in the panel"""
values = [c.GetValue()
| python | {
"resource": ""
} |
q9975 | Positional.GetValue | train | def GetValue(self):
'''
Positionals have no associated options_string,
so only the supplied arguments are returned.
The order is assumed to be the same as the order
of declaration in the client code
Returns
"argument_value"
'''
| python | {
"resource": ""
} |
q9976 | Flag.Update | train | def Update(self, size):
'''
Custom wrapper calculator to account for the
increased size of the _msg widget after being
inlined with the wx.CheckBox
'''
if self._msg is None:
return
help_msg = self._msg | python | {
"resource": ""
} |
q9977 | get_path | train | def get_path(language):
''' Returns the full path to the language file '''
filename = language.lower() + '.json'
lang_file_path = os.path.join(_DEFAULT_DIR, filename)
if not os.path.exists(lang_file_path):
| python | {
"resource": ""
} |
q9978 | trunc | train | def trunc(obj, max, left=0):
"""
Convert `obj` to string, eliminate newlines and truncate the string to `max`
characters. If there are more characters in the string add ``...`` to the
string. With `left=True`, the string can be truncated at the beginning.
@note: Does not catch exceptions when converting `obj` to string with `str`.
>>> trunc('This is a long text.', 8)
This ...
| python | {
"resource": ""
} |
q9979 | pp | train | def pp(i, base=1024):
"""
Pretty-print the integer `i` as a human-readable size representation.
"""
degree = 0
pattern = "%4d %s"
while i > base:
pattern = "%7.2f %s"
i | python | {
"resource": ""
} |
q9980 | pp_timestamp | train | def pp_timestamp(t):
"""
Get a friendly timestamp represented as a string.
"""
if t is None:
return '' | python | {
"resource": ""
} |
q9981 | GarbageGraph.print_stats | train | def print_stats(self, stream=None):
"""
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
"""
if not stream: # pragma: no cover
stream = sys.stdout
self.metadata.sort(key=lambda x: -x.size)
stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
for g in self.metadata:
| python | {
"resource": ""
} |
q9982 | Profile.disable | train | def disable(self, threads=True):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
| python | {
"resource": ""
} |
q9983 | Tee.flush | train | def flush(self):
""" Force commit changes to the file and stdout """
if not self.nostdout:
| python | {
"resource": ""
} |
q9984 | PStatsAdapter.parents | train | def parents(self, node):
"""Determine all parents of node in our tree"""
return [
| python | {
"resource": ""
} |
q9985 | PStatsAdapter.filename | train | def filename( self, node ):
"""Extension to squaremap api to provide "what file is this" information"""
if not node.directory:
# TODO: any cases other than built-ins?
return None
if node.filename == '~':
| python | {
"resource": ""
} |
q9986 | get_obj | train | def get_obj(ref):
"""Get object from string reference."""
oid = int(ref)
| python | {
"resource": ""
} |
q9987 | process | train | def process():
"""Get process overview."""
pmi = ProcessMemoryInfo()
| python | {
"resource": ""
} |
q9988 | tracker_index | train | def tracker_index():
"""Get tracker overview."""
stats = server.stats
if stats and stats.snapshots:
stats.annotate()
timeseries = []
for cls in stats.tracked_classes:
series = []
for snapshot in stats.snapshots:
series.append(snapshot.classes.get(cls, {}).get('sum', 0))
timeseries.append((cls, series))
series = [s.overhead for s in stats.snapshots]
timeseries.append(("Profiling overhead", series))
if stats.snapshots[0].system_total.data_segment:
# Assume tracked data resides in the data segment
| python | {
"resource": ""
} |
q9989 | tracker_class | train | def tracker_class(clsname):
"""Get class instance details."""
stats = server.stats
if not stats:
bottle.redirect('/tracker')
| python | {
"resource": ""
} |
q9990 | garbage_cycle | train | def garbage_cycle(index):
"""Get reference cycle details."""
graph = _compute_garbage_graphs()[int(index)]
| python | {
"resource": ""
} |
q9991 | _get_graph | train | def _get_graph(graph, filename):
"""Retrieve or render a graph."""
try:
rendered = graph.rendered_file
except AttributeError:
| python | {
"resource": ""
} |
q9992 | garbage_graph | train | def garbage_graph(index):
"""Get graph representation of reference cycle."""
graph = _compute_garbage_graphs()[int(index)]
reduce_graph = bottle.request.GET.get('reduce', '')
if reduce_graph:
graph = graph.reduce_to_cycles()
if not graph:
return None
filename = 'garbage%so%s.png' % | python | {
"resource": ""
} |
q9993 | _winreg_getShellFolder | train | def _winreg_getShellFolder( name ):
"""Get a shell folder by string name from the registry"""
k = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
| python | {
"resource": ""
} |
q9994 | appdatadirectory | train | def appdatadirectory( ):
"""Attempt to retrieve the current user's app-data directory
This is the location where application-specific
files should be stored. On *nix systems, this will
be the ${HOME}/.config directory. On Win32 systems, it will be
the "Application Data" directory. Note that for
Win32 systems it is normal to create a sub-directory
for storing data in the Application Data directory.
"""
if shell:
# on Win32 and have Win32all extensions, best-case
return shell_getShellFolder(shellcon.CSIDL_APPDATA)
if _winreg:
# on Win32, but no Win32 shell com available, this uses
# a direct registry access, likely to fail on Win98/Me
return _winreg_getShellFolder( 'AppData' )
# okay, what if for some reason _winreg is missing? would we want to | python | {
"resource": ""
} |
q9995 | get_objects | train | def get_objects(remove_dups=True, include_frames=False):
"""Return a list of all known objects excluding frame objects.
If (outer) frame objects shall be included, pass `include_frames=True`. In
order to prevent building reference cycles, the current frame object (of
the caller of get_objects) is ignored. This will not prevent creating
reference cycles if the object list is passed up the call-stack. Therefore,
frame objects are not included by default.
Keyword arguments:
remove_dups -- if True, all duplicate objects will be removed.
include_frames -- if True, includes frame objects.
"""
gc.collect()
# Do not initialize local variables before calling gc.get_objects or those
# will be included in the list. Furthermore, ignore frame objects to
# prevent reference cycles.
tmp = gc.get_objects()
tmp = [o for o in | python | {
"resource": ""
} |
q9996 | get_size | train | def get_size(objects):
"""Compute the total size of all elements in objects."""
res = 0
for o in objects:
| python | {
"resource": ""
} |
q9997 | get_diff | train | def get_diff(left, right):
"""Get the difference of both lists.
The result will be a dict with this form {'+': [], '-': []}.
Items listed in '+' exist only in the right list,
items listed in '-' exist only in the left list.
"""
res = {'+': [], '-': []}
def partition(objects):
"""Partition the passed object list."""
res = {}
for o in objects:
t = type(o)
if type(o) not in res:
res[t] = []
res[t].append(o)
return res
def get_not_included(foo, | python | {
"resource": ""
} |
q9998 | filter | train | def filter(objects, Type=None, min=-1, max=-1): #PYCHOK muppy filter
"""Filter objects.
The filter can be by type, minimum size, and/or maximum size.
Keyword arguments:
Type -- object type to filter by
min -- minimum object size
| python | {
"resource": ""
} |
q9999 | get_referents | train | def get_referents(object, level=1):
"""Get all referents of an object up to a certain level.
The referents will not be returned in a specific order and
will not contain duplicate objects. Duplicate objects will be removed.
Keyword arguments:
level -- level of indirection to which referents considered.
This function is recursive.
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.