_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q260100 | BezierPath._render_closure | validation | def _render_closure(self):
'''Use a closure so that draw attributes can be saved'''
fillcolor = self.fill
strokecolor = self.stroke
strokewidth = self.strokewidth
def _render(cairo_ctx):
'''
At the moment this is based on cairo.
TODO: Need to work out how to move the cairo specific
bits somewhere else.
'''
# Go to initial point (CORNER or CENTER):
transform = self._call_transform_mode(self._transform)
if fillcolor is None and strokecolor is None:
# Fixes _bug_FillStrokeNofillNostroke.bot
return
cairo_ctx.set_matrix(transform)
# Run the path commands on the cairo context:
self._traverse(cairo_ctx)
# Matrix affects stroke, so we need to reset it:
cairo_ctx.set_matrix(cairo.Matrix())
if fillcolor is not None and strokecolor is not None:
if strokecolor[3] < 1:
# Draw onto intermediate surface so that stroke
# does not overlay fill
cairo_ctx.push_group()
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill_preserve()
e = cairo_ctx.stroke_extents()
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_operator(cairo.OPERATOR_SOURCE)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
cairo_ctx.pop_group_to_source()
cairo_ctx.paint()
else:
# Fast path if no alpha in stroke
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill_preserve()
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
elif fillcolor is not None:
cairo_ctx.set_source_rgba(*fillcolor)
cairo_ctx.fill()
elif strokecolor is not None:
cairo_ctx.set_source_rgba(*strokecolor)
cairo_ctx.set_line_width(strokewidth)
cairo_ctx.stroke()
return _render | python | {
"resource": ""
} |
q260101 | BezierPath._linepoint | validation | def _linepoint(self, t, x0, y0, x1, y1):
""" Returns coordinates for point at t on the line.
Calculates the coordinates of x and y for a point at t on a straight line.
The t parameter is a number between 0.0 and 1.0,
x0 and y0 define the starting point of the line,
x1 and y1 the ending point of the line.
"""
# Originally from nodebox-gl
out_x = x0 + t * (x1 - x0)
out_y = y0 + t * (y1 - y0)
return (out_x, out_y) | python | {
"resource": ""
} |
q260102 | BezierPath._linelength | validation | def _linelength(self, x0, y0, x1, y1):
""" Returns the length of the line.
"""
# Originally from nodebox-gl
a = pow(abs(x0 - x1), 2)
b = pow(abs(y0 - y1), 2)
return sqrt(a + b) | python | {
"resource": ""
} |
q260103 | BezierPath._curvepoint | validation | def _curvepoint(self, t, x0, y0, x1, y1, x2, y2, x3, y3, handles=False):
""" Returns coordinates for point at t on the spline.
Calculates the coordinates of x and y for a point at t on the cubic bezier spline,
and its control points, based on the de Casteljau interpolation algorithm.
The t parameter is a number between 0.0 and 1.0,
x0 and y0 define the starting point of the spline,
x1 and y1 its control point,
x3 and y3 the ending point of the spline,
x2 and y2 its control point.
If the handles parameter is set, returns not only the point at t,
but the modified control points of p0 and p3 should this point split the path as well.
"""
# Originally from nodebox-gl
mint = 1 - t
x01 = x0 * mint + x1 * t
y01 = y0 * mint + y1 * t
x12 = x1 * mint + x2 * t
y12 = y1 * mint + y2 * t
x23 = x2 * mint + x3 * t
y23 = y2 * mint + y3 * t
out_c1x = x01 * mint + x12 * t
out_c1y = y01 * mint + y12 * t
out_c2x = x12 * mint + x23 * t
out_c2y = y12 * mint + y23 * t
out_x = out_c1x * mint + out_c2x * t
out_y = out_c1y * mint + out_c2y * t
if not handles:
return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y)
else:
return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y, x01, y01, x23, y23) | python | {
"resource": ""
} |
q260104 | BezierPath._segment_lengths | validation | def _segment_lengths(self, relative=False, n=20):
""" Returns a list with the lengths of each segment in the path.
"""
# From nodebox_gl
lengths = []
first = True
for el in self._get_elements():
if first is True:
close_x, close_y = el.x, el.y
first = False
elif el.cmd == MOVETO:
close_x, close_y = el.x, el.y
lengths.append(0.0)
elif el.cmd == CLOSE:
lengths.append(self._linelength(x0, y0, close_x, close_y))
elif el.cmd == LINETO:
lengths.append(self._linelength(x0, y0, el.x, el.y))
elif el.cmd == CURVETO:
x3, y3, x1, y1, x2, y2 = el.x, el.y, el.c1x, el.c1y, el.c2x, el.c2y
# (el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y)
lengths.append(self._curvelength(x0, y0, x1, y1, x2, y2, x3, y3, n))
if el.cmd != CLOSE:
x0 = el.x
y0 = el.y
if relative:
length = sum(lengths)
try:
# Relative segment lengths' sum is 1.0.
return map(lambda l: l / length, lengths)
except ZeroDivisionError:
# If the length is zero, just return zero for all segments
return [0.0] * len(lengths)
else:
return lengths | python | {
"resource": ""
} |
q260105 | BezierPath._get_length | validation | def _get_length(self, segmented=False, precision=10):
""" Returns the length of the path.
Calculates the length of each spline in the path, using n as a number of points to measure.
When segmented is True, returns a list containing the individual length of each spline
as values between 0.0 and 1.0, defining the relative length of each spline
in relation to the total path length.
"""
# Originally from nodebox-gl
if not segmented:
return sum(self._segment_lengths(n=precision), 0.0)
else:
return self._segment_lengths(relative=True, n=precision) | python | {
"resource": ""
} |
q260106 | BezierPath._get_elements | validation | def _get_elements(self):
'''
Yields all elements as PathElements
'''
for index, el in enumerate(self._elements):
if isinstance(el, tuple):
el = PathElement(*el)
self._elements[index] = el
yield el | python | {
"resource": ""
} |
q260107 | adjacency | validation | def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" An edge weight map indexed by node id's.
A dictionary indexed by node id1's in which each value is a
dictionary of connected node id2's linking to the edge weight.
If directed, edges go from id1 to id2, but not the other way.
If stochastic, all the weights for the neighbors of a given node sum to 1.
A heuristic can be a function that takes two node id's and returns
and additional cost for movement between the two nodes.
"""
v = {}
for n in graph.nodes:
v[n.id] = {}
for e in graph.edges:
id1 = e.node1.id
id2 = e.node2.id
if reversed:
id1, id2 = id2, id1
#if not v.has_key(id1): v[id1] = {}
#if not v.has_key(id2): v[id2] = {}
v[id1][id2] = 1.0 - e.weight*0.5
if heuristic:
v[id1][id2] += heuristic(id1, id2)
if not directed:
v[id2][id1] = v[id1][id2]
if stochastic:
for id1 in v:
d = sum(v[id1].values())
for id2 in v[id1]:
v[id1][id2] /= d
return v | python | {
"resource": ""
} |
q260108 | get_child_by_name | validation | def get_child_by_name(parent, name):
"""
Iterate through a gtk container, `parent`,
and return the widget with the name `name`.
"""
# http://stackoverflow.com/questions/2072976/access-to-widget-in-gtk
def iterate_children(widget, name):
if widget.get_name() == name:
return widget
try:
for w in widget.get_children():
result = iterate_children(w, name)
if result is not None:
return result
else:
continue
except AttributeError:
pass
return iterate_children(parent, name) | python | {
"resource": ""
} |
q260109 | sbot_executable | validation | def sbot_executable():
"""
Find shoebot executable
"""
gsettings=load_gsettings()
venv = gsettings.get_string('current-virtualenv')
if venv == 'Default':
sbot = which('sbot')
elif venv == 'System':
# find system python
env_venv = os.environ.get('VIRTUAL_ENV')
if not env_venv:
return which('sbot')
# First sbot in path that is not in current venv
for p in os.environ['PATH'].split(os.path.pathsep):
sbot='%s/sbot' % p
if not p.startswith(env_venv) and os.path.isfile(sbot):
return sbot
else:
sbot = os.path.join(venv, 'bin/sbot')
if not os.path.isfile(sbot):
print('Shoebot not found, reverting to System shoebot')
sbot = which('sbot')
return os.path.realpath(sbot) | python | {
"resource": ""
} |
q260110 | Page._description | validation | def _description(self):
""" Returns the meta description in the page.
"""
meta = self.find("meta", {"name":"description"})
if isinstance(meta, dict) and \
meta.has_key("content"):
return meta["content"]
else:
return u"" | python | {
"resource": ""
} |
q260111 | Page._keywords | validation | def _keywords(self):
""" Returns the meta keywords in the page.
"""
meta = self.find("meta", {"name":"keywords"})
if isinstance(meta, dict) and \
meta.has_key("content"):
keywords = [k.strip() for k in meta["content"].split(",")]
else:
keywords = []
return keywords | python | {
"resource": ""
} |
q260112 | sorted | validation | def sorted(list, cmp=None, reversed=False):
""" Returns a sorted copy of the list.
"""
list = [x for x in list]
list.sort(cmp)
if reversed: list.reverse()
return list | python | {
"resource": ""
} |
q260113 | unique | validation | def unique(list):
""" Returns a copy of the list without duplicates.
"""
unique = []; [unique.append(x) for x in list if x not in unique]
return unique | python | {
"resource": ""
} |
q260114 | clique | validation | def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
clique = [id]
for n in graph.nodes:
friend = True
for id in clique:
if n.id == id or graph.edge(n.id, id) == None:
friend = False
break
if friend:
clique.append(n.id)
return clique | python | {
"resource": ""
} |
q260115 | cliques | validation | def cliques(graph, threshold=3):
""" Returns all the cliques in the graph of at least the given size.
"""
cliques = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in cliques:
cliques.append(c)
return cliques | python | {
"resource": ""
} |
q260116 | DrawQueueSink.render | validation | def render(self, size, frame, drawqueue):
'''
Calls implmentation to get a render context,
passes it to the drawqueues render function
then calls self.rendering_finished
'''
r_context = self.create_rcontext(size, frame)
drawqueue.render(r_context)
self.rendering_finished(size, frame, r_context)
return r_context | python | {
"resource": ""
} |
q260117 | hexDump | validation | def hexDump(bytes):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(bytes)):
sys.stdout.write("%2x " % (ord(bytes[i])))
if (i+1) % 8 == 0:
print repr(bytes[i-7:i+1])
if(len(bytes) % 8 != 0):
print string.rjust("", 11), repr(bytes[i-len(bytes)%8:i+1]) | python | {
"resource": ""
} |
q260118 | readLong | validation | def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest) | python | {
"resource": ""
} |
q260119 | decodeOSC | validation | def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = {"i":readInt, "f":readFloat, "s":readString, "b":readBlob}
decoded = []
address, rest = readString(data)
typetags = ""
if address == "#bundle":
time, rest = readLong(rest)
# decoded.append(address)
# decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ",":
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print "Oops, typetag lacks the magic ,"
return decoded | python | {
"resource": ""
} |
q260120 | CallbackManager.handle | validation | def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source) | python | {
"resource": ""
} |
q260121 | CallbackManager.dispatch | validation | def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
msgtype = ""
try:
if type(message[0]) == str:
# got a single message
address = message[0]
self.callbacks[address](message)
elif type(message[0]) == list:
for msg in message:
self.dispatch(msg)
except KeyError, key:
print 'address %s not found, %s: %s' % (address, key, message)
pprint.pprint(message)
except IndexError, e:
print '%s: %s' % (e, message)
pass
except None, e:
print "Exception in", address, "callback :", e
return | python | {
"resource": ""
} |
q260122 | CallbackManager.add | validation | def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback | python | {
"resource": ""
} |
q260123 | find_example_dir | validation | def find_example_dir():
"""
Find examples dir .. a little bit ugly..
"""
# Replace %s with directory to check for shoebot menus.
code_stub = textwrap.dedent("""
from pkg_resources import resource_filename, Requirement, DistributionNotFound
try:
print(resource_filename(Requirement.parse('shoebot'), '%s'))
except DistributionNotFound:
pass
""")
# Needs to run in same python env as shoebot (may be different to gedits)
code = code_stub % 'share/shoebot/examples'
cmd = ["python", "-c", code]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
print('Shoebot experienced errors searching for install and examples.')
print('Errors:\n{0}'.format(errors.decode('utf-8')))
return None
else:
examples_dir = output.decode('utf-8').strip()
if os.path.isdir(examples_dir):
return examples_dir
# If user is running 'setup.py develop' then examples could be right here
#code = "from pkg_resources import resource_filename, Requirement; print resource_filename(Requirement.parse('shoebot'), 'examples/')"
code = code_stub % 'examples/'
cmd = ["python", "-c", code]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, errors = p.communicate()
examples_dir = output.decode('utf-8').strip()
if os.path.isdir(examples_dir):
return examples_dir
if examples_dir:
print('Shoebot could not find examples at: {0}'.format(examples_dir))
else:
print('Shoebot could not find install dir and examples.') | python | {
"resource": ""
} |
q260124 | AsynchronousFileReader.eof | validation | def eof(self):
"""
Check whether there is no more content to expect.
"""
return (not self.is_alive()) and self._queue.empty() or self._fd.closed | python | {
"resource": ""
} |
q260125 | ShoebotProcess.live_source_load | validation | def live_source_load(self, source):
"""
Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return:
"""
source = source.rstrip('\n')
if source != self.source:
self.source = source
b64_source = base64.b64encode(bytes(bytearray(source, "ascii")))
self.send_command(CMD_LOAD_BASE64, b64_source) | python | {
"resource": ""
} |
q260126 | ShoebotProcess.close | validation | def close(self):
"""
Close outputs of process.
"""
self.process.stdout.close()
self.process.stderr.close()
self.running = False | python | {
"resource": ""
} |
q260127 | ShoebotProcess.get_command_responses | validation | def get_command_responses(self):
"""
Get responses to commands sent
"""
if not self.response_queue.empty():
yield None
while not self.response_queue.empty():
line = self.response_queue.get()
if line is not None:
yield line | python | {
"resource": ""
} |
q260128 | CairoGIBackend.ensure_pycairo_context | validation | def ensure_pycairo_context(self, ctx):
"""
If ctx is a cairocffi Context convert it to a PyCairo Context
otherwise return the original context
:param ctx:
:return:
"""
if self.cairocffi and isinstance(ctx, self.cairocffi.Context):
from shoebot.util.cairocffi.cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
return _UNSAFE_cairocffi_context_to_pycairo(ctx)
else:
return ctx | python | {
"resource": ""
} |
q260129 | pangocairo_create_context | validation | def pangocairo_create_context(cr):
"""
If python-gi-cairo is not installed, using PangoCairo.create_context
dies with an unhelpful KeyError, check for that and output somethig
useful.
"""
# TODO move this to core.backend
try:
return PangoCairo.create_context(cr)
except KeyError as e:
if e.args == ('could not find foreign type Context',):
raise ShoebotInstallError("Error creating PangoCairo missing dependency: python-gi-cairo")
else:
raise | python | {
"resource": ""
} |
q260130 | is_list | validation | def is_list(str):
""" Determines if an item in a paragraph is a list.
If all of the lines in the markup start with a "*" or "1."
this indicates a list as parsed by parse_paragraphs().
It can be drawn with draw_list().
"""
for chunk in str.split("\n"):
chunk = chunk.replace("\t", "")
if not chunk.lstrip().startswith("*") \
and not re.search(r"^([0-9]{1,3}\. )", chunk.lstrip()):
return False
return True | python | {
"resource": ""
} |
q260131 | draw_math | validation | def draw_math(str, x, y, alpha=1.0):
""" Uses mimetex to generate a GIF-image from the LaTeX equation.
"""
try: from web import _ctx
except: pass
str = re.sub("</{0,1}math>", "", str.strip())
img = mimetex.gif(str)
w, h = _ctx.imagesize(img)
_ctx.image(img, x, y, alpha=alpha)
return w, h | python | {
"resource": ""
} |
q260132 | draw_list | validation | def draw_list(markup, x, y, w, padding=5, callback=None):
""" Draws list markup with indentation in NodeBox.
Draw list markup at x, y coordinates
using indented bullets or numbers.
The callback is a command that takes a str and an int.
"""
try: from web import _ctx
except: pass
i = 1
for chunk in markup.split("\n"):
if callback != None:
callback(chunk, i)
m = re.search("^([0-9]{1,3}\. )", chunk.lstrip())
if m:
indent = re.search("[0-9]", chunk).start()*padding*2
bullet = m.group(1)
dx = textwidth("000.")
chunk = chunk.lstrip(m.group(1)+"\t")
if chunk.lstrip().startswith("*"):
indent = chunk.find("*")*padding*2
bullet = u"•"
dx = textwidth("*")
chunk = chunk.lstrip("* \t")
_ctx.text(bullet, x+indent, y)
dx += padding + indent
_ctx.text(chunk, x+dx, y, width=w-dx)
y += _ctx.textheight(chunk, width=w-dx)
y += _ctx.textheight(" ") * 0.25
i += 1 | python | {
"resource": ""
} |
q260133 | draw_table | validation | def draw_table(table, x, y, w, padding=5):
""" This is a very poor algorithm to draw Wikipedia tables in NodeBox.
"""
try: from web import _ctx
except: pass
f = _ctx.fill()
_ctx.stroke(f)
h = _ctx.textheight(" ") + padding*2
row_y = y
if table.title != "":
_ctx.fill(f)
_ctx.rect(x, row_y, w, h)
_ctx.fill(1)
_ctx.text(table.title, x+padding, row_y+_ctx.fontsize()+ padding)
row_y += h
# A table of flags marking how long a cell
# from a previous row is still spanning in a column.
rowspans = [1 for i in range(10)]
previous_cell_w = 0
for row in table:
cell_x = x
# The width of a cell is the total table width
# evenly divided by the number of cells.
# Previous rows' cells still spanning will push cells
# to the right and decrease their width.
cell_w = 1.0 * w
cell_w -= previous_cell_w * len([n for n in rowspans if n > 1])
cell_w /= len(row)
# The height of each cell is the highest cell in the row.
# The height depends on the amount of text in the cell.
cell_h = 0
for cell in row:
this_h = _ctx.textheight(cell, width=cell_w-padding*2) + padding*2
cell_h = max(cell_h, this_h)
# Traverse each cell in this row.
i = 0
for cell in row:
# If a previous row's cell is still spanning,
# push this cell to the right.
if rowspans[i] > 1:
rowspans[i] -= 1
cell_x += previous_cell_w
i += 1
# Get the rowspan attribute for this cell.
m = re.search("rowspan=\"(.*?)\"", cell.properties)
if m:
rowspan = int(m.group(1))
rowspans[i] = rowspan
else:
rowspan = 1
# Padded cell text.
# Horizontal line above each cell.
# Vertical line before each cell.
_ctx.fill(f)
_ctx.text(cell, cell_x+padding, row_y+_ctx.fontsize()+padding, cell_w-padding*2)
_ctx.line(cell_x, row_y, cell_x+cell_w, row_y)
if cell_x > x:
_ctx.nofill()
_ctx.line(cell_x, row_y, cell_x, row_y+cell_h)
cell_x += cell_w
i += 1
# Move to next row.
row_y += cell_h
previous_cell_w = cell_w
# Table's bounding rectangle.
_ctx.nofill()
_ctx.rect(x, y, w, row_y-y) | python | {
"resource": ""
} |
q260134 | WikipediaPage.parse | validation | def parse(self, light=False):
""" Parses data from Wikipedia page markup.
The markup comes from Wikipedia's edit page.
We parse it here into objects containing plain text.
The light version parses only links to other articles, it's faster than a full parse.
"""
markup = self.markup
self.disambiguation = self.parse_disambiguation(markup)
self.categories = self.parse_categories(markup)
self.links = self.parse_links(markup)
if not light:
# Conversion of HTML markup to Wikipedia markup.
markup = self.convert_pre(markup)
markup = self.convert_li(markup)
markup = self.convert_table(markup)
markup = replace_entities(markup)
# Harvest references from the markup
# and replace them by footnotes.
markup = markup.replace("{{Cite", "{{cite")
markup = re.sub("\{\{ {1,2}cite", "{{cite", markup)
self.references, markup = self.parse_references(markup)
# Make sure there are no legend linebreaks in image links.
# Then harvest images and strip them from the markup.
markup = re.sub("\n+(\{\{legend)", "\\1", markup)
self.images, markup = self.parse_images(markup)
self.images.extend(self.parse_gallery_images(markup))
self.paragraphs = self.parse_paragraphs(markup)
self.tables = self.parse_tables(markup)
self.translations = self.parse_translations(markup)
self.important = self.parse_important(markup) | python | {
"resource": ""
} |
q260135 | WikipediaPage.parse_links | validation | def parse_links(self, markup):
""" Returns a list of internal Wikipedia links in the markup.
# A Wikipedia link looks like:
# [[List of operating systems#Embedded | List of embedded operating systems]]
# It does not contain a colon, this indicates images, users, languages, etc.
The return value is a list containing the first part of the link,
without the anchor.
"""
links = []
m = re.findall(self.re["link"], markup)
for link in m:
# We don't like [[{{{1|Universe (disambiguation)}}}]]
if link.find("{") >= 0:
link = re.sub("\{{1,3}[0-9]{0,2}\|", "", link)
link = link.replace("{", "")
link = link.replace("}", "")
link = link.split("|")
link[0] = link[0].split("#")
page = link[0][0].strip()
#anchor = u""
#display = u""
#if len(link[0]) > 1:
# anchor = link[0][1].strip()
#if len(link) > 1:
# display = link[1].strip()
if not page in links:
links.append(page)
#links[page] = WikipediaLink(page, anchor, display)
links.sort()
return links | python | {
"resource": ""
} |
q260136 | WikipediaPage.parse_images | validation | def parse_images(self, markup, treshold=6):
""" Returns a list of images found in the markup.
An image has a pathname, a description in plain text
and a list of properties Wikipedia uses to size and place images.
# A Wikipedia image looks like:
# [[Image:Columbia Supercomputer - NASA Advanced Supercomputing Facility.jpg|right|thumb|
# The [[NASA]] [[Columbia (supercomputer)|Columbia Supercomputer]].]]
# Parts are separated by "|".
# The first part is the image file, the last part can be a description.
# In between are display properties, like "right" or "thumb".
"""
images = []
m = re.findall(self.re["image"], markup)
for p in m:
p = self.parse_balanced_image(p)
img = p.split("|")
path = img[0].replace("[[Image:", "").strip()
description = u""
links = {}
properties = []
if len(img) > 1:
img = "|".join(img[1:])
links = self.parse_links(img)
properties = self.plain(img).split("|")
description = u""
# Best guess: an image description is normally
# longer than six characters, properties like
# "thumb" and "right" are less than six characters.
if len(properties[-1]) > treshold:
description = properties[-1]
properties = properties[:-1]
img = WikipediaImage(path, description, links, properties)
images.append(img)
markup = markup.replace(p, "")
return images, markup.strip() | python | {
"resource": ""
} |
q260137 | WikipediaPage.parse_balanced_image | validation | def parse_balanced_image(self, markup):
""" Corrects Wikipedia image markup.
Images have a description inside their link markup that
can contain link markup itself, make sure the outer "[" and "]" brackets
delimiting the image are balanced correctly (e.g. no [[ ]] ]]).
Called from parse_images().
"""
opened = 0
closed = 0
for i in range(len(markup)):
if markup[i] == "[": opened += 1
if markup[i] == "]": closed += 1
if opened == closed:
return markup[:i+1]
return markup | python | {
"resource": ""
} |
q260138 | WikipediaPage.connect_table | validation | def connect_table(self, table, chunk, markup):
""" Creates a link from the table to paragraph and vice versa.
Finds the first heading above the table in the markup.
This is the title of the paragraph the table belongs to.
"""
k = markup.find(chunk)
i = markup.rfind("\n=", 0, k)
j = markup.find("\n", i+1)
paragraph_title = markup[i:j].strip().strip("= ")
for paragraph in self.paragraphs:
if paragraph.title == paragraph_title:
paragraph.tables.append(table)
table.paragraph = paragraph | python | {
"resource": ""
} |
q260139 | WikipediaPage.parse_tables | validation | def parse_tables(self, markup):
""" Returns a list of tables in the markup.
A Wikipedia table looks like:
{| border="1"
|-
|Cell 1 (no modifier - not aligned)
|-
|align="right" |Cell 2 (right aligned)
|-
|}
"""
tables = []
m = re.findall(self.re["table"], markup)
for chunk in m:
table = WikipediaTable()
table.properties = chunk.split("\n")[0].strip("{|").strip()
self.connect_table(table, chunk, markup)
# Tables start with "{|".
# On the same line can be properties, e.g. {| border="1"
# The table heading starts with "|+".
# A new row in the table starts with "|-".
# The end of the table is marked with "|}".
row = None
for chunk in chunk.split("\n"):
chunk = chunk.strip()
if chunk.startswith("|+"):
title = self.plain(chunk.strip("|+"))
table.title = title
elif chunk.startswith("|-"):
if row:
row.properties = chunk.strip("|-").strip()
table.append(row)
row = None
elif chunk.startswith("|}"):
pass
elif chunk.startswith("|") \
or chunk.startswith("!"):
row = self.parse_table_row(chunk, row)
# Append the last row.
if row: table.append(row)
if len(table) > 0:
tables.append(table)
return tables | python | {
"resource": ""
} |
q260140 | WikipediaPage.parse_categories | validation | def parse_categories(self, markup):
""" Returns a list of categories the page belongs to.
# A Wikipedia category link looks like:
# [[Category:Computing]]
# This indicates the page is included in the given category.
# If "Category" is preceded by ":" this indicates a link to a category.
"""
categories = []
m = re.findall(self.re["category"], markup)
for category in m:
category = category.split("|")
page = category[0].strip()
display = u""
if len(category) > 1:
display = category[1].strip()
#if not categories.has_key(page):
# categories[page] = WikipediaLink(page, u"", display)
if not page in categories:
categories.append(page)
return categories | python | {
"resource": ""
} |
q260141 | WikipediaPage.parse_important | validation | def parse_important(self, markup):
""" Returns a list of words that appear in bold in the article.
Things like table titles are not added to the list,
these are probably bold because it makes the layout nice,
not necessarily because they are important.
"""
important = []
table_titles = [table.title for table in self.tables]
m = re.findall(self.re["bold"], markup)
for bold in m:
bold = self.plain(bold)
if not bold in table_titles:
important.append(bold.lower())
return important | python | {
"resource": ""
} |
q260142 | Variable.sanitize | validation | def sanitize(self, val):
"""Given a Variable and a value, cleans it out"""
if self.type == NUMBER:
try:
return clamp(self.min, self.max, float(val))
except ValueError:
return 0.0
elif self.type == TEXT:
try:
return unicode(str(val), "utf_8", "replace")
except:
return ""
elif self.type == BOOLEAN:
if unicode(val).lower() in ("true", "1", "yes"):
return True
else:
return False | python | {
"resource": ""
} |
q260143 | isList | validation | def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType)) | python | {
"resource": ""
} |
q260144 | isString | validation | def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str) | python | {
"resource": ""
} |
q260145 | buildTagMap | validation | def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built | python | {
"resource": ""
} |
q260146 | PageElement.setup | validation | def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self | python | {
"resource": ""
} |
q260147 | PageElement.extract | validation | def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self | python | {
"resource": ""
} |
q260148 | PageElement._lastRecursiveChild | validation | def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild | python | {
"resource": ""
} |
q260149 | PageElement.findNext | validation | def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs) | python | {
"resource": ""
} |
q260150 | PageElement.findAllNext | validation | def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs) | python | {
"resource": ""
} |
q260151 | PageElement.findNextSibling | validation | def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs) | python | {
"resource": ""
} |
q260152 | PageElement.findNextSiblings | validation | def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs) | python | {
"resource": ""
} |
q260153 | PageElement.findPrevious | validation | def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) | python | {
"resource": ""
} |
q260154 | PageElement.findAllPrevious | validation | def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs) | python | {
"resource": ""
} |
q260155 | PageElement.findPreviousSibling | validation | def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs) | python | {
"resource": ""
} |
q260156 | PageElement.findPreviousSiblings | validation | def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs) | python | {
"resource": ""
} |
q260157 | PageElement.findParent | validation | def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r | python | {
"resource": ""
} |
q260158 | PageElement.findParents | validation | def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs) | python | {
"resource": ""
} |
q260159 | PageElement._findAll | validation | def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results | python | {
"resource": ""
} |
q260160 | PageElement.toEncoding | validation | def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s | python | {
"resource": ""
} |
q260161 | Tag._invert | validation | def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i | python | {
"resource": ""
} |
q260162 | Tag._convertEntities | validation | def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x | python | {
"resource": ""
} |
q260163 | Tag.decompose | validation | def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract() | python | {
"resource": ""
} |
q260164 | Tag.renderContents | validation | def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s) | python | {
"resource": ""
} |
q260165 | Tag.find | validation | def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r | python | {
"resource": ""
} |
q260166 | Tag.findAll | validation | def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs) | python | {
"resource": ""
} |
q260167 | Tag._getAttrMap | validation | def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap | python | {
"resource": ""
} |
q260168 | BeautifulStoneSoup.convert_charref | validation | def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n) | python | {
"resource": ""
} |
q260169 | BeautifulStoneSoup.isSelfClosingTag | validation | def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name) | python | {
"resource": ""
} |
q260170 | BeautifulStoneSoup._toStringSubclass | validation | def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass) | python | {
"resource": ""
} |
q260171 | BeautifulStoneSoup.handle_pi | validation | def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction) | python | {
"resource": ""
} |
q260172 | BeautifulStoneSoup.handle_charref | validation | def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data) | python | {
"resource": ""
} |
q260173 | BeautifulStoneSoup.parse_declaration | validation | def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j | python | {
"resource": ""
} |
q260174 | BeautifulSoup.start_meta | validation | def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True | python | {
"resource": ""
} |
q260175 | UnicodeDammit._subMSChar | validation | def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub | python | {
"resource": ""
} |
q260176 | UnicodeDammit._toUnicode | validation | def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata | python | {
"resource": ""
} |
q260177 | UnicodeDammit._detectEncoding | validation | def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding | python | {
"resource": ""
} |
q260178 | shoebot_example | validation | def shoebot_example(**shoebot_kwargs):
"""
Decorator to run some code in a bot instance.
"""
def decorator(f):
def run():
from shoebot import ShoebotInstallError # https://github.com/shoebot/shoebot/issues/206
print(" Shoebot - %s:" % f.__name__.replace("_", " "))
try:
import shoebot
outputfile = "/tmp/shoebot-%s.png" % f.__name__
bot = shoebot.create_bot(outputfile=outputfile)
f(bot)
bot.finish()
print(' [passed] : %s' % outputfile)
print('')
except ShoebotInstallError as e:
print(' [failed]', e.args[0])
print('')
except Exception:
print(' [failed] - traceback:')
for line in traceback.format_exc().splitlines():
print(' %s' % line)
print('')
return run
return decorator | python | {
"resource": ""
} |
q260179 | ShoebotWidget.scale_context_and_center | validation | def scale_context_and_center(self, cr):
"""
Scale context based on difference between bot size and widget
"""
bot_width, bot_height = self.bot_size
if self.width != bot_width or self.height != bot_height:
# Scale up by largest dimension
if self.width < self.height:
scale_x = float(self.width) / float(bot_width)
scale_y = scale_x
cr.translate(0, (self.height - (bot_height * scale_y)) / 2.0)
elif self.width > self.height:
scale_y = float(self.height) / float(bot_height)
scale_x = scale_y
cr.translate((self.width - (bot_width * scale_x)) / 2.0, 0)
else:
scale_x = 1.0
scale_y = 1.0
cr.scale(scale_x, scale_y)
self.input_device.scale_x = scale_y
self.input_device.scale_y = scale_y | python | {
"resource": ""
} |
q260180 | ShoebotWidget.draw | validation | def draw(self, widget, cr):
'''
Draw just the exposed part of the backing store, scaled to fit
'''
if self.bot_size is None:
# No bot to draw yet.
self.draw_default_image(cr)
return
cr = driver.ensure_pycairo_context(cr)
surface = self.backing_store.surface
cr.set_source_surface(surface)
cr.paint() | python | {
"resource": ""
} |
q260181 | ShoebotWidget.create_rcontext | validation | def create_rcontext(self, size, frame):
'''
Creates a recording surface for the bot to draw on
:param size: The width and height of bot
'''
self.frame = frame
width, height = size
meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0, 0, width, height))
ctx = cairo.Context(meta_surface)
return ctx | python | {
"resource": ""
} |
q260182 | JSONEncoder.encode | validation | def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks) | python | {
"resource": ""
} |
q260183 | GtkInputDeviceMixin.get_key_map | validation | def get_key_map(self):
'''
Return a dict in the form of
SHOEBOT_KEY_NAME, GTK_VALUE
Shoebot key names look like KEY_LEFT, whereas Gdk uses KEY_Left
- Shoebot key names are derived from Nodebox 1, which was a mac
app.
'''
kdict = {}
for gdk_name in dir(Gdk):
nb_name = gdk_name.upper()
kdict[nb_name] = getattr(Gdk, gdk_name)
return kdict | python | {
"resource": ""
} |
q260184 | CairoImageSink._output_file | validation | def _output_file(self, frame):
"""
If filename was used output a filename, along with multifile
numbered filenames will be used.
If buff was specified it is returned.
:return: Output buff or filename.
"""
if self.buff:
return self.buff
elif self.multifile:
return self.file_root + "_%03d" % frame + self.file_ext
else:
return self.filename | python | {
"resource": ""
} |
q260185 | CairoImageSink.create_rcontext | validation | def create_rcontext(self, size, frame):
"""
Called when CairoCanvas needs a cairo context to draw on
"""
if self.format == 'pdf':
surface = cairo.PDFSurface(self._output_file(frame), *size)
elif self.format in ('ps', 'eps'):
surface = cairo.PSSurface(self._output_file(frame), *size)
elif self.format == 'svg':
surface = cairo.SVGSurface(self._output_file(frame), *size)
elif self.format == 'surface':
surface = self.target
else:
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)
return cairo.Context(surface) | python | {
"resource": ""
} |
q260186 | CairoImageSink.rendering_finished | validation | def rendering_finished(self, size, frame, cairo_ctx):
"""
Called when CairoCanvas has rendered a bot
"""
surface = cairo_ctx.get_target()
if self.format == 'png':
surface.write_to_png(self._output_file(frame))
surface.finish()
surface.flush() | python | {
"resource": ""
} |
q260187 | CairoCanvas.output_closure | validation | def output_closure(self, target, file_number=None):
'''
Function to output to a cairo surface
target is a cairo Context or filename
if file_number is set, then files will be numbered
(this is usually set to the current frame number)
'''
def output_context(ctx):
target_ctx = target
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_surface(ctx):
target_ctx = cairo.Context(target)
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_file(ctx):
root, extension = os.path.splitext(target)
if file_number:
filename = '%s_%04d%s' % (root, file_number, extension)
else:
filename = target
extension = extension.lower()
if extension == '.png':
surface = ctx.get_target()
surface.write_to_png(target)
elif extension == '.pdf':
target_ctx = cairo.Context(cairo.PDFSurface(filename, *self.size_or_default()))
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension in ('.ps', '.eps'):
target_ctx = cairo.Context(cairo.PSSurface(filename, *self.size_or_default()))
if extension == '.eps':
target_ctx.set_eps(extension='.eps')
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension == '.svg':
target_ctx = cairo.Context(cairo.SVGSurface(filename, *self.size_or_default()))
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return filename
if isinstance(target, cairo.Context):
return output_context
elif isinstance(target, cairo.Surface):
return output_surface
else:
return output_file | python | {
"resource": ""
} |
q260188 | UrbanDictionaryDefinition._parse | validation | def _parse(self):
""" Strips links from the definition and gathers them in a links property.
"""
p1 = "\[.*?\](.*?)\[\/.*?\]"
p2 = "\[(.*?)\]"
self.links = []
for p in (p1,p2):
for link in re.findall(p, self.description):
self.links.append(link)
self.description = re.sub(p, "\\1", self.description)
self.description = self.description.strip() | python | {
"resource": ""
} |
q260189 | create_canvas | validation | def create_canvas(src, format=None, outputfile=None, multifile=False, buff=None, window=False, title=None,
fullscreen=None, show_vars=False):
"""
Create canvas and sink for attachment to a bot
canvas is what draws images, 'sink' is the final consumer of the images
:param src: Defaults for title or outputfile if not specified.
:param format: CairoImageSink image format, if using buff instead of outputfile
:param buff: CairoImageSink buffer object to send output to
:param outputfile: CairoImageSink output filename e.g. "hello.svg"
:param multifile: CairoImageSink if True,
:param title: ShoebotWindow - set window title
:param fullscreen: ShoebotWindow - set window title
:param show_vars: ShoebotWindow - display variable window
Two kinds of sink are provided: CairoImageSink and ShoebotWindow
ShoebotWindow
Displays a window to draw shoebot inside.
CairoImageSink
Output to a filename (or files if multifile is set), or a buffer object.
"""
from core import CairoCanvas, CairoImageSink # https://github.com/shoebot/shoebot/issues/206
if outputfile:
sink = CairoImageSink(outputfile, format, multifile, buff)
elif window or show_vars:
from gui import ShoebotWindow
if not title:
if src and os.path.isfile(src):
title = os.path.splitext(os.path.basename(src))[0] + ' - Shoebot'
else:
title = 'Untitled - Shoebot'
sink = ShoebotWindow(title, show_vars, fullscreen=fullscreen)
else:
if src and isinstance(src, cairo.Surface):
outputfile = src
format = 'surface'
elif src and os.path.isfile(src):
outputfile = os.path.splitext(os.path.basename(src))[0] + '.' + (format or 'svg')
else:
outputfile = 'output.svg'
sink = CairoImageSink(outputfile, format, multifile, buff)
canvas = CairoCanvas(sink)
return canvas | python | {
"resource": ""
} |
q260190 | create_bot | validation | def create_bot(src=None, grammar=NODEBOX, format=None, outputfile=None, iterations=1, buff=None, window=False,
title=None, fullscreen=None, server=False, port=7777, show_vars=False, vars=None, namespace=None):
"""
Create a canvas and a bot with the same canvas attached to it
bot parameters
:param grammar: DRAWBOT or NODEBOX - decides what kind of bot is created
:param vars: preset dictionary of vars from the called
canvas parameters:
... everything else ...
See create_canvas for details on those parameters.
"""
canvas = create_canvas(src, format, outputfile, iterations > 1, buff, window, title, fullscreen=fullscreen,
show_vars=show_vars)
if grammar == DRAWBOT:
from shoebot.grammar import DrawBot
bot = DrawBot(canvas, namespace=namespace, vars=vars)
else:
from shoebot.grammar import NodeBot
bot = NodeBot(canvas, namespace=namespace, vars=vars)
if server:
from shoebot.sbio import SocketServer
socket_server = SocketServer(bot, "", port=port)
return bot | python | {
"resource": ""
} |
q260191 | run | validation | def run(src,
grammar=NODEBOX,
format=None,
outputfile=None,
iterations=1,
buff=None,
window=True,
title=None,
fullscreen=None,
close_window=False,
server=False,
port=7777,
show_vars=False,
vars=None,
namespace=None,
run_shell=False,
args=[],
verbose=False,
background_thread=True):
"""
Create and run a bot, the arguments all correspond to sanitized
commandline options.
:param background_thread: If True then use a background thread.
Other args are split into create_args and run_args
See create_bot for details on create_args
run_args are passed to bot.run - see Nodebot.run or Drawbot.run
Background thread:
readline in python is blocking, running the app in a background
thread opens up the main thread for IO on stdin/stdout, which
can be used for communication with shoebot when livecoding is
enabled.
See shoebot.io for implementation of the shell, and the gedit
plugin for an example of using livecoding.
"""
# Munge shoebogt sys.argv
sys.argv = [sys.argv[
0]] + args # Remove shoebot parameters so sbot can be used in place of the python interpreter (e.g. for sphinx).
# arguments for create_bot
create_args = [src,
grammar,
format,
outputfile,
iterations,
buff,
window,
title,
fullscreen,
server,
port,
show_vars]
create_kwargs = dict(vars=vars, namespace=namespace)
run_args = [src]
run_kwargs = dict(
iterations=iterations,
frame_limiter=window,
verbose=verbose,
# run forever except 1. windowed mode is off 2. if --close-window was specified and
# 3. if an output file was indicated
run_forever=window and not (close_window or bool(outputfile)),
)
# Run shoebot in a background thread so we can run a cmdline shell in the current thread
if background_thread:
sbot_thread = ShoebotThread(
create_args=create_args,
create_kwargs=create_kwargs,
run_args=run_args,
run_kwargs=run_kwargs,
send_sigint=run_shell
)
sbot_thread.start()
sbot = sbot_thread.sbot
else:
print('background thread disabled')
# This is a debug option, things should always work using the
# background thread (crosses fingers)
if run_shell:
# python readline is blocking, so ui must run in a seperate
# thread
raise ValueError('UI Must run in a separate thread to shell and shell needs main thread')
sbot_thread = None
sbot = create_bot(*create_args, **create_kwargs)
sbot.run(*run_args, **run_kwargs)
if run_shell:
import shoebot.sbio.shell
shell = shoebot.sbio.shell.ShoebotCmd(sbot, trusted=True)
try:
shell.cmdloop()
except KeyboardInterrupt as e:
publish_event(QUIT_EVENT) # Handle Ctrl-C
# KeyboardInterrupt is generated by os.kill from the other thread
if verbose:
raise
else:
return
elif background_thread:
try:
while sbot_thread.is_alive():
sleep(1)
except KeyboardInterrupt:
publish_event(QUIT_EVENT)
if all((background_thread, sbot_thread)):
sbot_thread.join()
return sbot | python | {
"resource": ""
} |
q260192 | ShoebotEditorWindow.save_as | validation | def save_as(self):
"""
Return True if the buffer was saved
"""
chooser = ShoebotFileChooserDialog(_('Save File'), None, Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
chooser.set_do_overwrite_confirmation(True)
chooser.set_transient_for(self)
saved = chooser.run() == Gtk.ResponseType.ACCEPT
if saved:
old_filename = self.filename
self.source_buffer.filename = chooser.get_filename()
if not self.save():
self.filename = old_filename
chooser.destroy()
return saved | python | {
"resource": ""
} |
q260193 | VarWindow.widget_changed | validation | def widget_changed(self, widget, v):
''' Called when a slider is adjusted. '''
# set the appropriate bot var
if v.type is NUMBER:
self.bot._namespace[v.name] = widget.get_value()
self.bot._vars[v.name].value = widget.get_value() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) # pretty dumb for now
elif v.type is BOOLEAN:
self.bot._namespace[v.name] = widget.get_active()
self.bot._vars[v.name].value = widget.get_active() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) # pretty dumb for now
elif v.type is TEXT:
self.bot._namespace[v.name] = widget.get_text()
self.bot._vars[v.name].value = widget.get_text() ## Not sure if this is how to do this - stu
publish_event(VARIABLE_UPDATED_EVENT, v) | python | {
"resource": ""
} |
q260194 | VarWindow.var_added | validation | def var_added(self, v):
"""
var was added in the bot while it ran, possibly
by livecoding
:param v:
:return:
"""
self.add_variable(v)
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all() | python | {
"resource": ""
} |
q260195 | VarWindow.var_deleted | validation | def var_deleted(self, v):
"""
var was added in the bot
:param v:
:return:
"""
widget = self.widgets[v.name]
# widgets are all in a single container ..
parent = widget.get_parent()
self.container.remove(parent)
del self.widgets[v.name]
self.window.set_size_request(400, 35 * len(self.widgets.keys()))
self.window.show_all() | python | {
"resource": ""
} |
q260196 | parse | validation | def parse(svg, cached=False, _copy=True):
""" Returns cached copies unless otherwise specified.
"""
if not cached:
dom = parser.parseString(svg)
paths = parse_node(dom, [])
else:
id = _cache.id(svg)
if not _cache.has_key(id):
dom = parser.parseString(svg)
_cache.save(id, parse_node(dom, []))
paths = _cache.load(id, _copy)
return paths | python | {
"resource": ""
} |
q260197 | get_attribute | validation | def get_attribute(element, attribute, default=0):
""" Returns XML element's attribute, or default if none.
"""
a = element.getAttribute(attribute)
if a == "":
return default
return a | python | {
"resource": ""
} |
q260198 | add_color_info | validation | def add_color_info(e, path):
""" Expand the path with color information.
Attempts to extract fill and stroke colors
from the element and adds it to path attributes.
"""
_ctx.colormode(RGB, 1.0)
def _color(hex, alpha=1.0):
if hex == "none": return None
n = int(hex[1:],16)
r = (n>>16)&0xff
g = (n>>8)&0xff
b = n&0xff
return _ctx.color(r/255.0, g/255.0, b/255.0, alpha)
path.fill = (0,0,0,0)
path.stroke = (0,0,0,0)
path.strokewidth = 0
# See if we can find an opacity attribute,
# which is the color's alpha.
alpha = get_attribute(e, "opacity", default="")
if alpha == "":
alpha = 1.0
else:
alpha = float(alpha)
# Colors stored as fill="" or stroke="" attributes.
try: path.fill = _color(get_attribute(e, "fill", default="#00000"), alpha)
except:
pass
try: path.stroke = _color(get_attribute(e, "stroke", default="none"), alpha)
except:
pass
try: path.strokewidth = float(get_attribute(e, "stroke-width", default="1"))
except:
pass
# Colors stored as a CSS style attribute, for example:
# style="fill:#ff6600;stroke:#ffe600;stroke-width:0.06742057"
style = get_attribute(e, "style", default="").split(";")
for s in style:
try:
if s.startswith("fill:"):
path.fill = _color(s.replace("fill:", ""))
elif s.startswith("stroke:"):
path.stroke = _color(s.replace("stroke:", ""))
elif s.startswith("stroke-width:"):
path.strokewidth = float(s.replace("stroke-width:", ""))
except:
pass
# A path with beginning and ending coordinate
# at the same location is considered closed.
# Unless it contains a MOVETO somewhere in the middle.
path.closed = False
if path[0].x == path[len(path)-1].x and \
path[0].y == path[len(path)-1].y:
path.closed = True
for i in range(1,-1):
if path[i].cmd == MOVETO:
path.closed = False
return path | python | {
"resource": ""
} |
q260199 | events.copy | validation | def copy(self, graph):
""" Returns a copy of the event handler, remembering the last node clicked.
"""
e = events(graph, self._ctx)
e.clicked = self.clicked
return e | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.