_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q9300 | Network.getWord | train | def getWord(self, pattern, returnDiff = 0):
"""
Returns the word associated with pattern.
Example: net.getWord([0, 0, 0, 1]) => "tom"
This method now returns the closest pattern based on distance.
"""
minDist = 10000
closest = None
for w in self.patterns:
# There may be some patterns that are scalars; we don't search
# those in this function:
if type(self.patterns[w]) in [int, float, int]: continue
if len(self.patterns[w]) == len(pattern):
dist = reduce(operator.add, [(a - b) ** 2 for (a,b) in zip(self.patterns[w], pattern )])
| python | {
"resource": ""
} |
q9301 | Network.addPattern | train | def addPattern(self, word, vector):
"""
Adds a pattern with key word.
Example: net.addPattern("tom", [0, 0, 0, 1])
"""
if word in self.patterns:
| python | {
"resource": ""
} |
q9302 | Network.compare | train | def compare(self, v1, v2):
"""
Compares two values. Returns 1 if all values are withing
self.tolerance of each other.
"""
try:
if len(v1) != len(v2): return 0
for x, y in zip(v1, v2):
if abs( x - y ) > self.tolerance:
return 0
return 1
except:
| python | {
"resource": ""
} |
q9303 | Network.shareWeights | train | def shareWeights(self, network, listOfLayerNamePairs = None):
"""
Share weights with another network. Connection
is broken after a randomize or change of size. Layers must have the same
names and sizes for shared connections in both networks.
Example: net.shareWeights(otherNet, [["hidden", "output"]])
This example will take the weights between the hidden and output layers
of otherNet and share them with net. Also, the bias values of
otherNet["output"] will be shared with net["output"].
If no list is given, will share all weights.
"""
if listOfLayerNamePairs == None:
listOfLayerNamePairs = []
for c in self.connections:
listOfLayerNamePairs.append( [c.fromLayer.name, c.toLayer.name] )
if self.verbosity > 1:
print("sharing weights:", self.name, listOfLayerNamePairs)
# first, check to see if this will work:
count = 0
for (fromLayerName, toLayerName) in listOfLayerNamePairs:
for c1 in range(len(self.connections)):
if self.connections[c1].fromLayer.name == fromLayerName and \
self.connections[c1].toLayer.name == toLayerName:
for c2 in range(len(network.connections)):
if network.connections[c2].fromLayer.name == fromLayerName and \
network.connections[c2].toLayer.name == toLayerName:
if (self.connections[c1].fromLayer.size != network.connections[c2].fromLayer.size) or \
(self.connections[c1].toLayer.size != network.connections[c2].toLayer.size):
raise AttributeError("shareSomeWeights: layer sizes did not match")
count += 1
if count != len(listOfLayerNamePairs):
raise AttributeError("shareSomeWeights: layer names did not match")
# ok, now let's share!
self.sharedWeights = 1
network.sharedWeights | python | {
"resource": ""
} |
q9304 | BackpropNetwork.propagate | train | def propagate(self, *arg, **kw):
""" Propagates activation through the network."""
output = Network.propagate(self, *arg, **kw)
if self.interactive:
self.updateGraphics()
# IMPORTANT: convert results from numpy.floats to conventional floats
if type(output) == dict:
| python | {
"resource": ""
} |
q9305 | BackpropNetwork.loadWeightsFromFile | train | def loadWeightsFromFile(self, filename, mode='pickle'):
"""
Deprecated. Use loadWeights instead.
"""
| python | {
"resource": ""
} |
q9306 | SRN.setSequenceType | train | def setSequenceType(self, value):
"""
You must set this!
"""
if value == "ordered-continuous":
self.orderedInputs = 1
self.initContext = 0
elif value == "random-segmented":
self.orderedInputs = 0
self.initContext = 1
elif value == "random-continuous":
self.orderedInputs = 0
self.initContext = 0
elif | python | {
"resource": ""
} |
q9307 | SRN.addThreeLayers | train | def addThreeLayers(self, inc, hidc, outc):
"""
Creates a three level network with a context layer.
"""
self.addLayer('input', inc)
self.addContextLayer('context', hidc, 'hidden')
self.addLayer('hidden', hidc)
| python | {
"resource": ""
} |
q9308 | SRN.addContext | train | def addContext(self, layer, hiddenLayerName = 'hidden', verbosity = 0):
"""
Adds a context layer. Necessary to keep self.contextLayers dictionary up to date.
"""
# better not add context layer first if using sweep() without mapInput
SRN.add(self, layer, verbosity)
if hiddenLayerName in self.contextLayers:
| python | {
"resource": ""
} |
q9309 | SRN.copyHiddenToContext | train | def copyHiddenToContext(self):
"""
Uses key to identify the hidden layer associated with each
layer in the self.contextLayers dictionary.
"""
for item in list(self.contextLayers.items()):
if self.verbosity > 2: print('Hidden layer: ', self.getLayer(item[0]).activation)
if self.verbosity > 2: print('Context layer | python | {
"resource": ""
} |
q9310 | Tokenizer.parse | train | def parse(self, line):
"""Tokenize a line of Fortran source."""
tokens = []
self.idx = -1 # Bogus value to ensure idx = 0 after first iteration
self.characters = iter(line)
self.update_chars()
while self.char != '\n':
# Update namelist group status
if self.char in ('&', '$'):
self.group_token = self.char
if self.group_token and (
(self.group_token, self.char) in (('&', '/'), ('$', '$'))):
self.group_token = False
word = ''
if self.char in self.whitespace:
while self.char in self.whitespace:
word += self.char
self.update_chars()
elif self.char in ('!', '#') or self.group_token is None:
# Abort the iteration and build the comment token
word = line[self.idx:-1]
self.char = '\n'
elif self.char in '"\'' or self.prior_delim:
word = self.parse_string()
elif self.char.isalpha():
word = self.parse_name(line)
elif self.char in ('+', '-'):
# Lookahead to check for IEEE value
self.characters, lookahead = itertools.tee(self.characters)
ieee_val = ''.join(itertools.takewhile(str.isalpha, lookahead))
if ieee_val.lower() in ('inf', 'infinity', 'nan'):
word = self.char + ieee_val
self.characters = lookahead
self.prior_char = ieee_val[-1]
self.char = next(lookahead, '\n')
else:
word = self.parse_numeric()
elif self.char.isdigit():
| python | {
"resource": ""
} |
q9311 | Tokenizer.parse_name | train | def parse_name(self, line):
"""Tokenize a Fortran name, such as a variable or subroutine."""
end = self.idx
for char in line[self.idx:]:
if not char.isalnum() and char not in '\'"_':
| python | {
"resource": ""
} |
q9312 | Tokenizer.parse_string | train | def parse_string(self):
"""Tokenize a Fortran string."""
word = ''
if self.prior_delim:
delim = self.prior_delim
self.prior_delim = None
else:
delim = self.char
word += self.char
self.update_chars()
while True:
if self.char == delim:
| python | {
"resource": ""
} |
q9313 | Tokenizer.parse_numeric | train | def parse_numeric(self):
"""Tokenize a Fortran numerical value."""
word = ''
frac = False
if self.char == '-':
word += self.char
self.update_chars()
while self.char.isdigit() or (self.char == '.' and not frac):
# Only allow one decimal point
if self.char == '.':
frac = True
word += self.char
self.update_chars()
| python | {
"resource": ""
} |
q9314 | Tokenizer.update_chars | train | def update_chars(self):
"""Update the current charters in the tokenizer."""
# NOTE: We spoof | python | {
"resource": ""
} |
q9315 | pycomplex | train | def pycomplex(v_str):
"""Convert string repr of Fortran complex to Python complex."""
assert isinstance(v_str, str)
if v_str[0] == '(' and v_str[-1] == ')' and len(v_str.split(',')) == 2:
v_re, v_im = v_str[1:-1].split(',', 1)
# NOTE: Failed float(str) will raise ValueError
| python | {
"resource": ""
} |
q9316 | pybool | train | def pybool(v_str, strict_logical=True):
"""Convert string repr of Fortran logical to Python logical."""
assert isinstance(v_str, str)
assert isinstance(strict_logical, bool)
if strict_logical:
v_bool = v_str.lower()
else:
try:
if v_str.startswith('.'):
v_bool = v_str[1].lower()
else:
v_bool = v_str[0].lower()
except IndexError:
raise ValueError('{0} is not a valid logical constant.'
| python | {
"resource": ""
} |
q9317 | pystr | train | def pystr(v_str):
"""Convert string repr of Fortran string to Python string."""
assert isinstance(v_str, str)
if v_str[0] in ("'", '"') and v_str[0] == v_str[-1]:
quote = v_str[0]
out = v_str[1:-1]
else:
# NOTE: This is non-standard Fortran.
| python | {
"resource": ""
} |
q9318 | pad_array | train | def pad_array(v, idx):
"""Expand lists in multidimensional arrays to pad unset values."""
i_v, i_s = idx[0]
if len(idx) > 1:
# Append missing subarrays
v.extend([[] for _ in range(len(v), i_v - i_s + 1)])
# Pad elements
| python | {
"resource": ""
} |
q9319 | merge_values | train | def merge_values(src, new):
"""Merge two lists or dicts into a single element."""
if isinstance(src, dict) and isinstance(new, dict):
return merge_dicts(src, new)
else:
if not isinstance(src, list):
| python | {
"resource": ""
} |
q9320 | merge_lists | train | def merge_lists(src, new):
"""Update a value list with a list of new or updated values."""
l_min, l_max = (src, new) if len(src) < len(new) else (new, src)
l_min.extend(None for i in range(len(l_min), len(l_max)))
for i, val in enumerate(new):
if isinstance(val, dict) and isinstance(src[i], dict):
new[i] = merge_dicts(src[i], val)
| python | {
"resource": ""
} |
q9321 | merge_dicts | train | def merge_dicts(src, patch):
"""Merge contents of dict `patch` into `src`."""
for key in patch:
if key in src:
if isinstance(src[key], dict) and isinstance(patch[key], dict):
merge_dicts(src[key], patch[key])
else:
| python | {
"resource": ""
} |
q9322 | delist | train | def delist(values):
"""Reduce lists of zero or one elements to individual values."""
assert isinstance(values, list)
if not values:
| python | {
"resource": ""
} |
q9323 | count_values | train | def count_values(tokens):
"""Identify the number of values ahead of the current token."""
ntoks = 0
for tok in tokens:
if tok in ('=', '/', '$', '&'):
if ntoks > 0 and tok == '=':
| python | {
"resource": ""
} |
q9324 | Parser.comment_tokens | train | def comment_tokens(self, value):
"""Validate and set the comment token string."""
if not isinstance(value, str):
| python | {
"resource": ""
} |
q9325 | Parser.default_start_index | train | def default_start_index(self, value):
"""Validate and set the default start index."""
if not isinstance(value, int):
raise TypeError('default_start_index attribute must be | python | {
"resource": ""
} |
q9326 | Parser.sparse_arrays | train | def sparse_arrays(self, value):
"""Validate and enable spare arrays."""
if not isinstance(value, | python | {
"resource": ""
} |
q9327 | Parser.global_start_index | train | def global_start_index(self, value):
"""Set the global start index."""
if not isinstance(value, int) and value is not None:
raise | python | {
"resource": ""
} |
q9328 | Parser.row_major | train | def row_major(self, value):
"""Validate and set row-major format for multidimensional arrays."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
| python | {
"resource": ""
} |
q9329 | Parser.strict_logical | train | def strict_logical(self, value):
"""Validate and set the strict logical flag."""
if value is not None:
if not isinstance(value, bool):
raise TypeError(
| python | {
"resource": ""
} |
q9330 | Parser.read | train | def read(self, nml_fname, nml_patch_in=None, patch_fname=None):
"""Parse a Fortran namelist file and store the contents.
>>> parser = f90nml.Parser()
>>> data_nml = parser.read('data.nml')
"""
# For switching based on files versus paths
nml_is_path = not hasattr(nml_fname, 'read')
patch_is_path = not hasattr(patch_fname, 'read')
# Convert patch data to a Namelist object
if nml_patch_in is not None:
if not isinstance(nml_patch_in, dict):
raise TypeError('Input patch must be a dict or a Namelist.')
nml_patch = copy.deepcopy(Namelist(nml_patch_in))
if not patch_fname and nml_is_path:
patch_fname = nml_fname + '~'
elif not patch_fname:
raise ValueError('f90nml: error: No output file for patch.')
elif nml_fname == patch_fname:
raise ValueError('f90nml: error: Patch filepath cannot be the '
| python | {
"resource": ""
} |
q9331 | Parser._readstream | train | def _readstream(self, nml_file, nml_patch_in=None):
"""Parse an input stream containing a Fortran namelist."""
nml_patch = nml_patch_in if nml_patch_in is not None else Namelist()
tokenizer = Tokenizer()
f90lex = []
for line in nml_file:
toks = tokenizer.parse(line)
while tokenizer.prior_delim:
new_toks = tokenizer.parse(next(nml_file))
# Skip empty lines
if not new_toks:
continue
# The tokenizer always pre-tokenizes the whitespace (leftover
# behaviour from Fortran source parsing) so this must be added
# manually.
if new_toks[0].isspace():
toks[-1] += new_toks.pop(0)
# Append the rest of the string (if present)
if new_toks:
toks[-1] += new_toks[0]
# Attach the rest of the tokens
toks.extend(new_toks[1:])
toks.append('\n')
f90lex.extend(toks)
self.tokens = iter(f90lex)
nmls = Namelist()
# Attempt to get first token; abort on empty file
try:
self._update_tokens(write_token=False)
except StopIteration:
return nmls
# TODO: Replace "while True" with an update_token() iterator
while True:
try:
# Check for classic group terminator
if self.token == 'end':
self._update_tokens()
# Ignore tokens outside of namelist groups
while self.token not in ('&', '$'):
| python | {
"resource": ""
} |
q9332 | Parser._parse_indices | train | def _parse_indices(self):
"""Parse a sequence of Fortran vector indices as a list of tuples."""
v_name = self.prior_token
v_indices = []
| python | {
"resource": ""
} |
q9333 | Parser._parse_index | train | def _parse_index(self, v_name):
"""Parse Fortran vector indices into a tuple of Python indices."""
i_start = i_end = i_stride = None
# Start index
self._update_tokens()
try:
i_start = int(self.token)
self._update_tokens()
except ValueError:
if self.token in (',', ')'):
raise ValueError('{0} index cannot be empty.'.format(v_name))
elif not self.token == ':':
raise
# End index
if self.token == ':':
self._update_tokens()
try:
i_end = 1 + int(self.token)
self._update_tokens()
except ValueError:
if self.token == ':':
raise ValueError('{0} end index cannot be implicit '
| python | {
"resource": ""
} |
q9334 | Parser._parse_value | train | def _parse_value(self, write_token=True, override=None):
"""Convert string repr of Fortran type to equivalent Python type."""
v_str = self.prior_token
# Construct the complex string
if v_str == '(':
v_re = self.token
self._update_tokens(write_token)
assert self.token == ','
self._update_tokens(write_token)
v_im = self.token
self._update_tokens(write_token)
assert self.token == ')'
self._update_tokens(write_token, override)
v_str = '({0}, {1})'.format(v_re, v_im)
recast_funcs = | python | {
"resource": ""
} |
q9335 | Parser._update_tokens | train | def _update_tokens(self, write_token=True, override=None,
patch_skip=False):
"""Update tokens to the next available values."""
next_token = next(self.tokens)
patch_value = ''
patch_tokens = ''
if self.pfile and write_token:
token = override if override else self.token
patch_value += token
while next_token[0] in self.comment_tokens + whitespace:
if self.pfile:
if next_token[0] in self.comment_tokens:
while not next_token == '\n':
patch_tokens += next_token
next_token = next(self.tokens)
patch_tokens += next_token
# Several sections rely on StopIteration to terminate token search
# If that occurs, dump the patched tokens immediately
try:
next_token = next(self.tokens)
except StopIteration:
| python | {
"resource": ""
} |
q9336 | Parser._append_value | train | def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):
"""Update a list of parsed values with a new value."""
for _ in range(n_vals):
if v_idx:
try:
v_i = next(v_idx)
except StopIteration:
# Repeating commas are null-statements and can be ignored
# Otherwise, we warn the user that this is a bad namelist
if next_value is not None:
warnings.warn('f90nml: warning: Value {0} is not assigned to '
'any variable and has been removed.'
''.format(next_value))
# There are more values than indices, so we stop here
break
v_s = [self.default_start_index if idx is None else idx
for idx in v_idx.first]
if not self.row_major:
v_i = v_i[::-1]
v_s = v_s[::-1]
# Multidimensional arrays
if not self.sparse_arrays:
pad_array(v_values, list(zip(v_i, v_s)))
# We iterate inside the v_values and inspect successively
# deeper lists within the list tree. If the requested index is
# missing, we re-size that particular entry.
# (NOTE: This is unnecessary when sparse_arrays is disabled.)
v_subval = v_values
for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]):
try:
| python | {
"resource": ""
} |
q9337 | display_pil_image | train | def display_pil_image(im):
"""Displayhook function for PIL Images, rendered as PNG."""
from IPython.core import display
b = BytesIO()
im.save(b, format='png')
data = b.getvalue() | python | {
"resource": ""
} |
q9338 | AppNexusClient._prepare_uri | train | def _prepare_uri(self, service_name, **parameters):
"""Prepare the URI for a request
:param service_name: The target service
:type service_name: str
:param kwargs: query parameters
:return: The uri of the request
"""
query_parameters = []
for key, value in parameters.items():
if isinstance(value, (list, tuple)):
value = ",".join([str(member) for member in value])
if isinstance(value, bool):
value = "true" if value else "false"
| python | {
"resource": ""
} |
q9339 | AppNexusClient._handle_rate_exceeded | train | def _handle_rate_exceeded(self, response): # pragma: no cover
"""Handles rate exceeded errors"""
| python | {
"resource": ""
} |
q9340 | AppNexusClient.update_token | train | def update_token(self):
"""Request a new token and store it for future use"""
logger.info('updating token')
if None in self.credentials.values():
raise RuntimeError("You must provide an username and a password")
credentials = dict(auth=self.credentials)
url = self.test_url if self.test else self.url
response = requests.post(url + "auth",
json=credentials)
data = response.json()["response"]
if "error_id" in data and data["error_id"] == "NOAUTH":
| python | {
"resource": ""
} |
q9341 | AppNexusClient.check_errors | train | def check_errors(self, response, data):
"""Check for errors and raise an appropriate error if needed"""
if "error_id" in data:
error_id = data["error_id"]
if error_id in self.error_ids:
raise self.error_ids[error_id](response)
if "error_code" in data:
| python | {
"resource": ""
} |
q9342 | AppNexusClient.get | train | def get(self, service_name, **kwargs):
"""Retrieve data from AppNexus API"""
| python | {
"resource": ""
} |
q9343 | AppNexusClient.modify | train | def modify(self, service_name, json, **kwargs):
"""Modify an AppNexus object"""
| python | {
"resource": ""
} |
q9344 | AppNexusClient.create | train | def create(self, service_name, json, **kwargs):
"""Create a new AppNexus object"""
| python | {
"resource": ""
} |
q9345 | AppNexusClient.delete | train | def delete(self, service_name, *ids, **kwargs):
"""Delete an AppNexus object"""
| python | {
"resource": ""
} |
q9346 | GraphWin.plot | train | def plot(self, x, y, color="black"):
"""
Uses coordinant system.
"""
| python | {
"resource": ""
} |
q9347 | GraphWin.plotPixel | train | def plotPixel(self, x, y, color="black"):
"""
Doesn't use coordinant system.
"""
| python | {
"resource": ""
} |
q9348 | GraphWin.getMouse | train | def getMouse(self):
"""
Waits for a mouse click.
"""
# FIXME: this isn't working during an executing cell
| python | {
"resource": ""
} |
q9349 | event_html_page_context | train | def event_html_page_context(app, pagename, templatename, context, doctree):
"""Called when the HTML builder has created a context dictionary to render a template with.
Conditionally adding disqus.js to <head /> if the directive is used in a page.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str pagename: Name of the page being rendered (without .html or any file extension).
:param str templatename: Page name with .html.
:param dict context: Jinja2 HTML context.
:param docutils.nodes.document doctree: Tree of | python | {
"resource": ""
} |
q9350 | DisqusDirective.get_shortname | train | def get_shortname(self):
"""Validate and returns disqus_shortname config value.
:returns: disqus_shortname config value.
:rtype: str
"""
disqus_shortname = self.state.document.settings.env.config.disqus_shortname
if not disqus_shortname:
raise ExtensionError('disqus_shortname config value must be set for the disqus extension to work.') | python | {
"resource": ""
} |
q9351 | DisqusDirective.get_identifier | train | def get_identifier(self):
"""Validate and returns disqus_identifier option value.
:returns: disqus_identifier config value.
:rtype: str
"""
if 'disqus_identifier' in self.options:
return self.options['disqus_identifier']
title_nodes = self.state.document.traverse(nodes.title)
if not | python | {
"resource": ""
} |
q9352 | DisqusDirective.run | train | def run(self):
"""Executed by Sphinx.
:returns: Single DisqusNode instance with config values passed as arguments.
:rtype: list
"""
disqus_shortname = self.get_shortname()
| python | {
"resource": ""
} |
q9353 | attach | train | def attach(func, params):
"""
Given a function and a namespace of possible parameters,
bind any params matching the signature | python | {
"resource": ""
} |
q9354 | init_config | train | def init_config(overrides):
"""
Install the config dict as pmxbot.config, setting overrides,
and return the result.
"""
pmxbot.config = config | python | {
"resource": ""
} |
q9355 | initialize | train | def initialize(config):
"Initialize the bot with a dictionary of config items"
config = init_config(config)
_setup_logging()
_load_library_extensions()
if not Handler._registry:
raise RuntimeError("No handlers registered")
class_ = _load_bot_class()
config.setdefault('log_channels', [])
config.setdefault('other_channels', [])
channels = config.log_channels + config.other_channels
log.info('Running with config')
log.info(pprint.pformat(config)) | python | {
"resource": ""
} |
q9356 | Sentinel.augment_items | train | def augment_items(cls, items, **defaults):
"""
Iterate over the items, keeping a adding properties as supplied by
Sentinel objects encountered.
>>> from more_itertools.recipes import consume
>>> res = Sentinel.augment_items(['a', 'b', NoLog, 'c'], secret=False)
>>> res = tuple(res)
>>> consume(map(print, res))
a
b
c
>>> [msg.secret for msg in res]
[False, False, True]
>>> msgs = ['a', NoLog, 'b', SwitchChannel('#foo'), 'c']
>>> res = Sentinel.augment_items(msgs, secret=False, channel=None)
>>> res = tuple(res)
>>> consume(map(print, res))
a
b
c
>>> [msg.channel for msg in res] == [None, None, '#foo']
True
>>> [msg.secret for msg in res]
[False, True, True]
>>> res = Sentinel.augment_items(msgs, channel='#default', secret=False)
>>> consume(map(print, [msg.channel for msg in res]))
#default
#default | python | {
"resource": ""
} |
q9357 | Handler.find_matching | train | def find_matching(cls, message, channel):
"""
Yield ``cls`` subclasses that match message and channel
"""
return (
handler
for handler in | python | {
"resource": ""
} |
q9358 | Handler._set_implied_name | train | def _set_implied_name(self):
"Allow the name of this handler to default to the function name."
if getattr(self, 'name', None) is None: | python | {
"resource": ""
} |
q9359 | CommandHandler._set_doc | train | def _set_doc(self, func):
"""
If no doc was explicitly set, use the function's docstring, trimming
| python | {
"resource": ""
} |
q9360 | Bot.allow | train | def allow(self, channel, message):
"""
Allow plugins to filter content.
"""
return all(
| python | {
"resource": ""
} |
q9361 | Bot._handle_output | train | def _handle_output(self, channel, output):
"""
Given an initial channel and a sequence of messages or sentinels,
output the messages.
"""
augmented = | python | {
"resource": ""
} |
q9362 | Bot.handle_action | train | def handle_action(self, channel, nick, msg):
"Core message parser and dispatcher"
messages = ()
for handler in Handler.find_matching(msg, channel):
exception_handler = functools.partial(
self._handle_exception,
handler=handler,
)
rest = handler.process(msg)
client = connection = event = None
# for regexp handlers
match = rest
f = handler.attach(locals())
results = pmxbot.itertools.generate_results(f)
| python | {
"resource": ""
} |
q9363 | Bot.handle_scheduled | train | def handle_scheduled(self, target):
"""
target is a Handler or simple callable
"""
if not isinstance(target, Handler): | python | {
"resource": ""
} |
q9364 | log_leave | train | def log_leave(event, nick, channel):
"""
Log a quit or part event.
"""
| python | {
"resource": ""
} |
q9365 | Karma.link | train | def link(self, thing1, thing2):
"""
Link thing1 and thing2, adding the karma of each into
a single entry.
If any thing does not exist, it is created.
"""
thing1 = thing1.strip().lower()
thing2 = thing2.strip().lower()
if thing1 == thing2:
| python | {
"resource": ""
} |
q9366 | SQLiteKarma._get | train | def _get(self, id):
"Return keys and value for karma id"
VALUE_SQL = "SELECT karmavalue from karma_values where karmaid = ?"
KEYS_SQL = "SELECT karmakey | python | {
"resource": ""
} |
q9367 | MongoDBKarma.repair_duplicate_names | train | def repair_duplicate_names(self):
"""
Prior to 1101.1.1, pmxbot would incorrectly create new karma records
for individuals with multiple names.
This routine corrects those records.
"""
for name in self._all_names():
cur = self.db.find({'names': name})
main_doc = next(cur)
for duplicate in cur:
query = {'_id': main_doc['_id']}
update = {
| python | {
"resource": ""
} |
q9368 | google | train | def google(rest):
"Look up a phrase on google"
API_URL = 'https://www.googleapis.com/customsearch/v1?'
try:
key = pmxbot.config['Google API key']
except KeyError:
return "Configure 'Google API key' in config"
# Use a custom search that searches everything normally
# http://stackoverflow.com/a/11206266/70170
custom_search = '004862762669074674786:hddvfu0gyg0'
params = dict(
key=key,
cx=custom_search,
| python | {
"resource": ""
} |
q9369 | annoy | train | def annoy():
"Annoy everyone with meaningless banter"
def a1():
yield 'OOOOOOOHHH, WHAT DO YOU DO WITH A DRUNKEN SAILOR'
yield 'WHAT DO YOU DO WITH A DRUNKEN SAILOR'
yield "WHAT DO YOU DO WITH A DRUNKEN SAILOR, EARLY IN THE MORNIN'?"
def a2():
yield "I'M HENRY THE EIGHTH I AM"
yield "HENRY THE EIGHTH I AM I AM"
yield (
"I GOT MARRIED TO THE GIRL NEXT DOOR; SHE'S BEEN MARRIED "
"SEVEN TIMES BEFORE")
def a3():
yield "BOTHER!"
yield "BOTHER BOTHER BOTHER!"
yield "BOTHER BOTHER BOTHER BOTHER!"
def a4():
yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
yield "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
yield "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
def | python | {
"resource": ""
} |
q9370 | cheer | train | def cheer(rest):
"Cheer for something"
if rest:
karma.Karma.store.change(rest, 1)
return "/me cheers for %s!" % | python | {
"resource": ""
} |
q9371 | golfclap | train | def golfclap(rest):
"Clap for something"
clapv = random.choice(phrases.clapvl)
adv = random.choice(phrases.advl)
adj = random.choice(phrases.adjl)
| python | {
"resource": ""
} |
q9372 | oregontrail | train | def oregontrail(channel, nick, rest):
"It's edutainment!"
rest = rest.strip()
if rest:
who = rest.strip()
else:
who = random.choice([nick, channel, 'pmxbot'])
| python | {
"resource": ""
} |
q9373 | eball | train | def eball(rest):
"Ask the magic 8ball a question"
try:
url = 'https://8ball.delegator.com/magic/JSON/'
url += rest
result = | python | {
"resource": ""
} |
q9374 | roll | train | def roll(rest, nick):
"Roll a die, default = 100."
if rest:
rest = rest.strip()
| python | {
"resource": ""
} |
q9375 | ticker | train | def ticker(rest):
"Look up a ticker symbol's current trading value"
ticker = rest.upper()
# let's use Yahoo's nifty csv facility, and pull last time/price both
symbol = 's'
last_trade_price = 'l1'
last_trade_time = 't1'
change_percent = 'p2'
format = ''.join((symbol, last_trade_time, last_trade_price, change_percent))
url = (
'http://finance.yahoo.com/d/quotes.csv?s=%(ticker)s&f=%(format)s'
% locals())
stock_info = csv.reader(util.open_url(url).text.splitlines())
| python | {
"resource": ""
} |
q9376 | pick | train | def pick(rest):
"Pick between a few options"
question = rest.strip()
choices = util.splitem(question)
if len(choices) == 1:
return "I can't pick if you give me only one choice!"
else:
pick = random.choice(choices)
| python | {
"resource": ""
} |
q9377 | lunch | train | def lunch(rest):
"Pick where to go to lunch"
rs = rest.strip()
if not rs:
return "Give me an area and I'll pick a place: (%s)" % (
', '.join(list(pmxbot.config.lunch_choices)))
if rs not in pmxbot.config.lunch_choices:
return "I didn't recognize that area; here's what i have: | python | {
"resource": ""
} |
q9378 | insult | train | def insult(rest):
"Generate a random insult from datahamster"
# not supplying any style will automatically redirect to a random
url = 'http://autoinsult.datahamster.com/'
ins_type = random.randrange(4)
ins_url = url + "?style={ins_type}".format(**locals())
insre = re.compile('<div class="insult" id="insult">(.*?)</div>')
resp = requests.get(ins_url)
resp.raise_for_status()
insult = insre.search(resp.text).group(1)
if not insult:
return
if rest:
insultee = rest.strip()
karma.Karma.store.change(insultee, -1)
if ins_type in (0, 2):
| python | {
"resource": ""
} |
q9379 | bitchingisuseless | train | def bitchingisuseless(channel, rest):
"It really is, ya know..."
rest = rest.strip()
if rest:
karma.Karma.store.change(rest, -1)
else:
karma.Karma.store.change(channel, -1)
| python | {
"resource": ""
} |
q9380 | curse | train | def curse(rest):
"Curse the day!"
if rest:
cursee = rest
else: | python | {
"resource": ""
} |
q9381 | bless | train | def bless(rest):
"Bless the day!"
if rest:
blesse = rest
else: | python | {
"resource": ""
} |
q9382 | blame | train | def blame(channel, rest, nick):
"Pass the buck!"
if rest:
blamee = rest
else:
blamee = channel
karma.Karma.store.change(nick, -1)
if random.randint(1, 10) == 1:
yield "/me jumps atop the chair and | python | {
"resource": ""
} |
q9383 | calc | train | def calc(rest):
"Perform a basic calculation"
mo = calc_exp.match(rest)
if mo:
try:
return str(eval(rest))
except Exception: | python | {
"resource": ""
} |
q9384 | define | train | def define(rest):
"Define a word"
word = rest.strip()
res = util.lookup(word)
fmt = (
'{lookup.provider} says: {res}' if res else
"{lookup.provider} does | python | {
"resource": ""
} |
q9385 | urbandict | train | def urbandict(rest):
"Define a word with Urban Dictionary"
word = rest.strip()
definition = util.urban_lookup(word)
if not definition:
return "Arg! | python | {
"resource": ""
} |
q9386 | acit | train | def acit(rest):
"Look up an acronym"
word = rest.strip()
res = util.lookup_acronym(word)
if res is None:
return "Arg! | python | {
"resource": ""
} |
q9387 | version | train | def version(rest):
"Get the version of pmxbot or one of its plugins"
pkg = rest.strip() or 'pmxbot'
if pkg.lower() == 'python':
| python | {
"resource": ""
} |
q9388 | timezone | train | def timezone(rest):
"""Convert date between timezones.
Example:
> !tz 11:00am UTC in PDT
11:00 UTC -> 4:00 PDT
UTC is implicit
> !tz 11:00am in PDT
11:00 UTC -> 4:00 PDT
> !tz 11:00am PDT
11:00 PDT -> 18:00 UTC
"""
if ' in ' in rest:
| python | {
"resource": ""
} |
q9389 | SelectableStorage.finalize | train | def finalize(cls):
"Delete the various persistence objects"
for finalizer in cls._finalizers:
try:
| python | {
"resource": ""
} |
q9390 | pmon | train | def pmon(month):
"""
P the month
>>> print(pmon('2012-08'))
August, 2012
"""
year, month = month.split('-')
| python | {
"resource": ""
} |
q9391 | pday | train | def pday(dayfmt):
"""
P the day
>>> print(pday('2012-08-24'))
Friday the 24th
"""
year, month, day = map(int, | python | {
"resource": ""
} |
q9392 | patch_compat | train | def patch_compat(config):
"""
Support older config values.
"""
if 'web_host' in config:
config['host'] = config.pop('web_host')
if | python | {
"resource": ""
} |
q9393 | ChannelPage.date_key | train | def date_key(cls, month_string):
"""
Return a key suitable for sorting by month.
>>> k1 = ChannelPage.date_key('September, 2012')
>>> k2 = ChannelPage.date_key('August, 2013')
>>> k2 > k1
True
| python | {
"resource": ""
} |
q9394 | LegacyPage.forward | train | def forward(self, channel, date_s, fragment):
"""
Given an HREF in the legacy timezone, redirect to an href for UTC.
"""
time_s, sep, nick = fragment.rpartition('.')
time = datetime.datetime.strptime(time_s, '%H.%M.%S')
date = datetime.datetime.strptime(date_s, '%Y-%m-%d')
dt = datetime.datetime.combine(date, time.time())
loc_dt = self.timezone.localize(dt)
utc_dt = loc_dt.astimezone(pytz.utc)
| python | {
"resource": ""
} |
q9395 | logs | train | def logs(channel):
"Where can one find the logs?"
default_url = 'http://' + socket.getfqdn()
base = pmxbot.config.get('logs URL', default_url)
logged_channel = channel in pmxbot.config.log_channels
| python | {
"resource": ""
} |
q9396 | log | train | def log(channel, rest):
"""
Enable or disable logging for a channel;
use 'please' to start logging and 'stop please' to stop.
"""
words = [s.lower() for s in rest.split()]
if 'please' not in words:
return
include = 'stop' not in rest
existing = set(pmxbot.config.log_channels)
| python | {
"resource": ""
} |
q9397 | MongoDBLogger._add_recent | train | def _add_recent(self, doc, logged_id):
"Keep a tab on the most recent message for each channel"
spec = dict(channel=doc['channel'])
doc['ref'] = | python | {
"resource": ""
} |
q9398 | FullTextMongoDBLogger._has_fulltext | train | def _has_fulltext(cls, uri):
"""
Enable full text search on the messages if possible and return True.
If the full text search cannot be enabled, then return False.
"""
coll | python | {
"resource": ""
} |
q9399 | Bot._find_user_channel | train | def _find_user_channel(self, username):
"""
Use slacker to resolve the username to an opened IM channel
"""
user_id = self.slacker.users.get_user_id(username)
im = user_id and | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.