docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
初始化异常.
Parameters:
message (str): - 异常信息
ID (str): - 任务ID
exception (str): - 异常栈信息
status_code (int): - 状态码 | def __init__(self, message, ID, exception=None, status_code=None):
super().__init__(message, status_code)
self.ID = ID
self.EXCEPTION = exception | 1,089,921 |
Expire the key, delete the value, and call the callback function
if one is specified.
Args:
key: The ``TimedDict`` key | def expire_key(self, key):
value = self.base_dict[key]
del self[key]
if self.callback is not None:
self.callback(
key, value, *self.callback_args, **self.callback_kwargs) | 1,090,068 |
Return suffix from `path`.
``/home/xex/somefile.txt`` --> ``txt``.
Args:
path (str): Full file path.
Returns:
str: Suffix.
Raises:
UserWarning: When ``/`` is detected in suffix. | def _get_suffix(path):
suffix = os.path.basename(path).split(".")[-1]
if "/" in suffix:
raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path)
return suffix | 1,090,116 |
Creates a map of letter use in a word.
Args:
word: a string to create a letter map from
Returns:
a dictionary of {letter: integer count of letter in word} | def _letter_map(word):
lmap = {}
for letter in word:
try:
lmap[letter] += 1
except KeyError:
lmap[letter] = 1
return lmap | 1,090,144 |
Finds anagrams in word.
Args:
word: the string to base our search off of
sowpods: boolean to declare TWL or SOWPODS words file
start: a string of starting characters to find anagrams based on
end: a string of ending characters to find anagrams based on
Yields:
a tuple o... | def anagrams_in_word(word, sowpods=False, start="", end=""):
input_letters, blanks, questions = blank_tiles(word)
for tile in start + end:
input_letters.append(tile)
for word in word_list(sowpods, start, end):
lmap = _letter_map(input_letters)
used_blanks = 0
for lett... | 1,090,145 |
Parse configuration values from the database.
The extension must have been previously initialized.
If a key is not found in the database, it will be created with the
default value specified.
Arguments:
keys (list[str]): list of keys to parse. If the list is empty, then
... | def parse_conf(self, keys=[]):
confs = self.app.config.get('WAFFLE_CONFS', {})
if not keys:
keys = confs.keys()
result = {}
for key in keys:
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
... | 1,090,358 |
Update database values and application configuration.
The provided keys must be defined in the ``WAFFLE_CONFS`` setting.
Arguments:
new_values (dict): dict of configuration variables and their values
The dict has the following structure:
{
... | def update_db(self, new_values):
confs = self.app.config.get('WAFFLE_CONFS', {})
to_update = {}
for key in new_values.keys():
# Some things cannot be changed...
if key.startswith('WAFFLE_'):
continue
# No arbitrary keys
i... | 1,090,359 |
Initialize the extension for the given application and store.
Parse the configuration values stored in the database obtained from
the ``WAFFLE_CONFS`` value of the configuration.
Arguments:
app: Flask application instance
configstore (WaffleStore): database store. | def init_app(self, app, configstore):
if not hasattr(app, 'extensions'):
app.extensions = {}
self.state = _WaffleState(app, configstore)
app.extensions['waffleconf'] = self.state | 1,090,362 |
Support item access via dot notation.
Args:
__key: Key to fetch | def __getattr__(self, __key: Hashable) -> Any:
try:
return self[__key]
except KeyError:
raise AttributeError(__key) | 1,090,552 |
Support item assignment via dot notation.
Args:
__key: Key to set value for
__value: Value to set key to | def __setattr__(self, __key: Hashable, __value: Any) -> None:
try:
self[__key] = __value
except Exception as err:
raise AttributeError(str(err)) | 1,090,553 |
Support item deletion via dot notation.
Args:
__key: Key to delete | def __delattr__(self, __key: Hashable) -> None:
try:
del self[__key]
except TypeError as err:
raise AttributeError(str(err)) | 1,090,554 |
Init a new FileSystem Cache
Args:
cache_dir
maxsize. Maximum size of the cache, in GB | def __init__(
self, dir=None, options=None, upstream=None, prefix='', **kwargs):
from ambry.dbexceptions import ConfigurationError
super(FsCache, self).__init__(upstream, **kwargs)
self._cache_dir = dir
if not os.path.isabs(self._cache_dir):
raise Con... | 1,090,581 |
Copy a file to the repository
Args:
source: Absolute path to the source file, or a file-like object
rel_path: path relative to the root of the repository | def put(self, source, rel_path, metadata=None):
# This case should probably be deprecated.
if not isinstance(rel_path, basestring):
rel_path = rel_path.cache_key
sink = self.put_stream(rel_path, metadata=metadata)
try:
copy_file_or_flo(source, sink)
... | 1,090,583 |
Init a new FileSystem Cache
Args:
cache_dir
maxsize. Maximum size of the cache, in GB | def __init__(self, dir=dir, size=10000, upstream=None, **kwargs):
from ambry.dbexceptions import ConfigurationError
super(FsLimitedCache, self).__init__(dir, upstream=upstream, **kwargs)
self._size = size
self.maxsize = int(size) * 1048578 # size in MB
self.readonly... | 1,090,595 |
If there are not size bytes of space left, delete files
until there is
Args:
size: size of the current file
this_rel_path: rel_pat to the current file, so we don't delete it. | def _free_up_space(self, size, this_rel_path=None):
# Amount of space we are over ( bytes ) for next put
space = self.size + size - self.maxsize
if space <= 0:
return
removes = []
for row in self.database.execute("SELECT path, size, time FROM files ORDER ... | 1,090,599 |
Retrieve the prices of a list of equities as a DataFrame (columns = symbols)
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"]
start (datetime): The date at the start of the period being analyzed.
end (dat... | def price_dataframe(symbols=('sne',),
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31),
price_type='actual_close',
cleaner=util.clean_dataframe,
):
if isinstance(price_type, basestring):
price_type = [price_type]
start = nlp.util.normalize_date(start or d... | 1,090,676 |
Create a tar-file or a tar.gz at location: filename.
params:
gzip: if True - gzip the file, default = False
dirs: dirs to be tared
returns a 3-tuple with returncode (integer), terminal output (string)
and the new filename. | def tar(filename, dirs=[], gzip=False):
if gzip:
cmd = 'tar czvf %s ' % filename
else:
cmd = 'tar cvf %s ' % filename
if type(dirs) != 'list':
dirs = [dirs]
cmd += ' '.join(str(x) for x in dirs)
retcode, output = sh(cmd)
return (retcode, output, filename) | 1,090,941 |
Create a Thing.
Args:
name (str): name of the Thing. This corresponds to the
AWS IoT Thing name.
client (str): MQTT client connection to use. This can be set
anytime before publishing Thing messages to the server. | def __init__(self, name, client=None):
self._name = name
self.client = client
self._state = None | 1,091,118 |
Publish thing state to AWS IoT.
Args:
state (dict): object state. Must be JSON serializable (i.e., not
have circular references). | def publish_state(self, state):
message = json.dumps({'state': {'reported': state}})
self.client.publish(self.topic, message)
self._state = state | 1,091,119 |
Gets the enum for the op code
Args:
op: value of the op code (will be casted to int)
Returns:
The enum that matches the op code | def parse(cls, op):
for event in cls:
if event.value == int(op):
return event
return None | 1,091,124 |
Runs the thread
This method handles sending the heartbeat to the Discord websocket server, so the connection
can remain open and the bot remain online for those commands that require it to be.
Args:
None | def run(self):
while self.should_run:
try:
self.logger.debug('Sending heartbeat, seq ' + last_sequence)
self.ws.send(json.dumps({
'op': 1,
'd': last_sequence
}))
except Exception as e:
... | 1,091,126 |
Sets up the internal logger
Args:
logging_level: what logging level to use
log_to_console: whether or not to log to the console | def _setup_logger(self, logging_level: int, log_to_console: bool):
self.logger = logging.getLogger('discord')
self.logger.handlers = []
self.logger.setLevel(logging_level)
formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%... | 1,091,129 |
Make an HTTP request
Args:
path: the URI path (not including the base url, start with
the first uri segment, like 'users/...')
method: the HTTP method to use (GET, POST, PATCH, ...)
data: the data to send as JSON data
expected_status: expected HTT... | def _query(self, path: str, method: str, data: Dict[str, Any]=None, expected_status: int = 200) \
-> Union[List[Dict[str, Any]], Dict[str, Any], None]:
url = Pycord.url_base + path
self.logger.debug(f'Making {method} request to "{url}"')
if method == 'GET':
r = r... | 1,091,130 |
Callback for receiving errors from the websocket connection
Args:
ws: websocket connection
error: exception raised | def _ws_on_error(self, ws: websocket.WebSocketApp, error: Exception):
self.logger.error(f'Got error from websocket connection: {str(error)}') | 1,091,132 |
Callback for closing the websocket connection
Args:
ws: websocket connection (now closed) | def _ws_on_close(self, ws: websocket.WebSocketApp):
self.connected = False
self.logger.error('Websocket closed')
self._reconnect_websocket() | 1,091,133 |
Callback for sending the initial authentication data
This "payload" contains the required data to authenticate this websocket
client as a suitable bot connection to the Discord websocket.
Args:
ws: websocket connection | def _ws_on_open(self, ws: websocket.WebSocketApp):
payload = {
'op': WebSocketEvent.IDENTIFY.value,
'd': {
'token': self.token,
'properties': {
'$os': sys.platform,
'$browser': 'Pycord',
... | 1,091,134 |
Call this method to make the connection to the Discord websocket
This method is not blocking, so you'll probably want to call it after
initializating your Pycord object, and then move on with your code. When
you want to block on just maintaining the websocket connection, then call
``kee... | def connect_to_websocket(self):
self.logger.info('Making websocket connection')
try:
if hasattr(self, '_ws'):
self._ws.close()
except:
self.logger.debug('Couldn\'t terminate previous websocket connection')
self._ws = websocket.WebSocketApp... | 1,091,135 |
Disconnects from the websocket
Args:
None | def disconnect_from_websocket(self):
self.logger.warning('Disconnecting from websocket')
self.logger.info('Stopping keep alive thread')
self._ws_keep_alive.stop()
self._ws_keep_alive.join()
self.logger.info('Stopped keep alive thread')
try:
self.logge... | 1,091,136 |
Updates the bot's status
This is used to get the game that the bot is "playing" or to clear it.
If you want to set a game, pass a name; if you want to clear it, either
call this method without the optional ``name`` parameter or explicitly
pass ``None``.
Args:
name: ... | def set_status(self, name: str = None):
game = None
if name:
game = {
'name': name
}
payload = {
'op': WebSocketEvent.STATUS_UPDATE.value,
'd': {
'game': game,
'status': 'online',
... | 1,091,137 |
Send a message to a channel
For formatting options, see the documentation:
https://discordapp.com/developers/docs/resources/channel#create-message
Args:
id: channel snowflake id
message: your message (string)
Returns:
Dictionary object of the ne... | def send_message(self, id: str, message: str) -> Dict[str, Any]:
if not self.connected:
raise ValueError('Websocket not connected')
return self._query(f'channels/{id}/messages', 'POST', {'content': message}) | 1,091,147 |
Set a cookie.
Args:
key (:obj:`str`): Cookie name
value (:obj:`str`): Cookie value
domain (:obj:`str`): Cookie domain
path (:obj:`str`): Cookie value
secure (:obj:`bool`): True if secure, False otherwise
httponly (:obj:`bool`): True if it'... | def set_cookie(self, key, value, domain=None, path='/', secure=False,
httponly=True):
self._cookies[key] = value
if domain:
self._cookies[key]['domain'] = domain
if path:
self._cookies[key]['path'] = path
if secure:
self._co... | 1,091,181 |
Bakes the response and returns the content.
Args:
start_response (:obj:`callable`): Callback method that accepts
status code and a list of tuples (pairs) containing headers'
key and value respectively. | def bake(self, start_response):
if isinstance(self._content, six.text_type):
self._content = self._content.encode('utf8')
if self._content_length is None:
self._content_length = len(self._content)
self._headers[HttpResponseHeaders.CONTENT_LENGTH] = \
... | 1,091,183 |
Helper method to set a redirect response.
Args:
url (:obj:`str`): URL to redirect to
status (:obj:`str`, optional): Status code of the response | def set_redirect(self, url, status=HttpStatusCodes.HTTP_303):
self.set_status(status)
self.set_content('')
self.set_header(HttpResponseHeaders.LOCATION, url) | 1,091,184 |
Helper method to set a JSON response.
Args:
obj (:obj:`object`): JSON serializable object
status (:obj:`str`, optional): Status code of the response | def set_json(self, obj, status=HttpStatusCodes.HTTP_200):
obj = json.dumps(obj, sort_keys=True, default=lambda x: str(x))
self.set_status(status)
self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json')
self.set_content(obj) | 1,091,185 |
Decorator routes an Rogo IntentRequest.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@ask.intent('WeatherIntent')
def weather(city):
return statement('I predi... | def intent(self, intent_name):
def decorator(f):
self._intent_view_funcs[intent_name] = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
return decorator | 1,091,197 |
Remove "special" characters from beginning and the end of the `inp`. For
example ``,a-sd,-/`` -> ``a-sd``.
Args:
inp (str): Input string.
hairs (str): List of characters which should be removed. See
:attr:`HAIRS` for details.
Returns:
str: Cleaned string. | def remove_hairs(inp, hairs=HAIRS):
while inp and inp[-1] in hairs:
inp = inp[:-1]
while inp and inp[0] in hairs:
inp = inp[1:]
return inp | 1,091,370 |
Parametrized decorator wrapping the :func:`remove_hairs` function.
Args:
hairs (str, default HAIRS): List of characters which should be removed.
See :attr:`HAIRS` for details. | def remove_hairs_decorator(fn=None, hairs=HAIRS):
def decorator_wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
out = fn(*args, **kwargs)
return remove_hairs(out, hairs)
return decorator
if fn:
return decorator_wrapper(fn)
return decor... | 1,091,371 |
Returns all data entries for a particular key. Default is the main key.
Args:
key (str): key whose values to return (default: main key)
Returns:
List of all data entries for the key | def get_all(self, key=None):
key = self.definition.main_key if key is None else key
key = self.definition.key_synonyms.get(key, key)
entries = self._get_all(key)
if key in self.definition.scalar_nonunique_keys:
return set(entries)
return entries | 1,091,452 |
Updates a Clip.
Parameters:
- args Dictionary of other fields
Accepted fields can be found here:
https://github.com/kippt/api-documentation/blob/master/objects/clip.md | def update(self, **args):
# JSONify our data.
data = json.dumps(args)
r = requests.put(
"https://kippt.com/api/clips/%s" % (self.id),
headers=self.kippt.header,
data=data)
return (r.json()) | 1,091,545 |
Comment on a clip.
Parameters:
- body (Required) | def comment(self, body):
# Merge our url as a parameter and JSONify it.
data = json.dumps({'body': body})
r = requests.post(
"https://kippt.com/api/clips/%s/comments" (self.id),
headers=self.kippt.header,
data=data
)
return (r.json()) | 1,091,547 |
Find the href destinations of all links at URL
Arguments:
- `url`:
Return: list[str]
Exceptions: None | def find_links(url):
url = protocolise(url)
content = requests.get(url).content
flike = StringIO(content)
root = html.parse(flike).getroot()
atags = root.cssselect('a')
hrefs = [a.attrib['href'] for a in atags]
# !!! This does the wrong thing for bbc.co.uk/index.html
hrefs = [h if h... | 1,091,636 |
Simple nearest interpolator that interpolates based on
the minima and maxima of points based on the passed
resolution in res.
Parameters:
-----------
xs -- A collection of `ndim` arrays of points.
res -- List of resolutions. | def simple_nearest_indices(xs,res):
maxs = [max(a) for a in xs]
mins = [min(a) for a in xs]
XS = [np.linspace(mn, mx, r) for mn,mx,r in zip(mins,maxs,res)];
XS = tuple(np.meshgrid(*XS,indexing='ij'));
if type(xs) != tuple:
xs = tuple(xs);
return nearest_indices(xs,XS); | 1,091,659 |
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data. | def getvector(d,s):
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]); | 1,091,775 |
Restrict data by indices.
Parameters:
----------
d -- the flds/sclr data
restrict -- a tuple of [xmin,xmax,...] etx | def restrict(d,restrict):
notqs = ['t','xs','ys','zs','fd','sd']
keys = [k for k in d if k not in notqs];
if len(restrict) == 2:
for k in keys:
d[k] = d[k][restrict[0]:restrict[1]]
elif len(restrict) == 4:
for k in keys:
d[k] = d[k][
restrict... | 1,091,777 |
run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully | def runCmd(cls, cmd):
cit.echo(cmd, "command")
result = os.system(cmd)
cls.checkResult(result) | 1,091,914 |
run command and return the str format stdout
Args:
cmd: string
Returns:
str: what the command's echo | def readCmd(cls, cmd):
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate(input=None) # proc_stdin
return proc_stdout.decode() | 1,091,915 |
Check and update file compares with remote_url
Args:
file_: str. Local filename. Normally it's __file__
url: str. Remote url of raw file content. Normally it's https://raw.github.com/...
Returns:
bool: file updated or not | def updateFile(cls, file_, url):
def compare(s1, s2):
return s1 == s2, len(s2) - len(s1)
if not url or not file_:
return False
try:
req = urllib.request.urlopen(url)
raw_codes = req.read()
with open(file_, 'rb') as f:
... | 1,091,916 |
Get info by ajax
Args:
url: string
Returns:
dict: json decoded into a dict | def ajax(cls, url, param={}, method='get'):
param = urllib.parse.urlencode(param)
if method.lower() == 'get':
req = urllib.request.Request(url + '?' + param)
elif method.lower() == 'post':
param = param.encode('utf-8')
req = urllib.request.Request(url... | 1,091,917 |
Get the Windows OS version running on the machine.
Params:
None
Returns:
The Windows OS version running on the machine (comparables with the values list in the class). | def get_version():
# Other OS check
if not 'win' in sys.platform:
return NO_WIN
# Get infos
win_ver = sys.getwindowsversion()
try:
# Python 3.6.x or upper -> Use 'platform_version' attribute
major, minor, build = win_ver.platform_version
except AttributeErr... | 1,092,166 |
Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary. | def _parse_doc(doc):
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines))
if len(descriptions) < 3:
description = lines[0]
else:
description = "{0}\n\n{1}".format(
lines[0], textwrap.dedent("\n".join(descriptions[2:])))
args ... | 1,092,219 |
Utility function to look up XDG basedir locations
Args:
__pkg: Package name
__type: Location type | def __user_location(__pkg: str, type_) -> str:
if ALLOW_DARWIN and sys.platform == 'darwin':
user_dir = '~/Library/{}'.format(__LOCATIONS[type_][0])
else:
user_dir = getenv('XDG_{}_HOME'.format(type_.upper()),
path.sep.join([getenv('HOME', ''),
... | 1,092,234 |
Return all configs for given package.
Args:
__pkg: Package name
__name: Configuration file name | def get_configs(__pkg: str, __name: str = 'config') -> List[str]:
dirs = [user_config(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_CONFIG_DIRS', '/etc/xdg').split(':'))
configs = []
for dname in reversed(dirs):
test_path = path.join(d... | 1,092,236 |
Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name | def get_data(__pkg: str, __name: str) -> str:
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg)) | 1,092,237 |
Return all data directories for given package.
Args:
__pkg: Package name | def get_data_dirs(__pkg: str) -> List[str]:
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)] | 1,092,238 |
Get a single model from the server.
Args:
model (string): The class as a string.
model_id (string): The integer ID as a string.
Returns:
:class:`cinder_data.model.CinderModel`: A instance of the model. | def get_model(self, model, model_id):
return self._store.find_record(self._get_model_class(model), int(model_id)) | 1,092,449 |
Get all the models from the server.
Args:
model (string): The class as a string.
page (string, optional): The page number as a string
Returns:
list: A list of instances of the requested model. | def get_models(self, model, page=None):
if page is not None:
return self._store.find_all(self._get_model_class(model), params={'page': int(page)})
else:
return self._store.find_all(self._get_model_class(model)) | 1,092,450 |
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match. | def check_cell_type(cell, cell_type):
if cell_type == None or cell_type == type(None):
return cell == None or (isinstance(cell, basestring) and not cell)
else:
return isinstance(cell, cell_type) | 1,092,733 |
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens ... | def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):
units = units if units != None else {}
return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0,
flags={}, units=units, parens_as_neg=parens_as_neg) | 1,092,734 |
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values | def auto_convert_cell(flagable, cell, position, worksheet, flags, units, parens_as_neg=True):
conversion = cell
# Is an numeric?
if isinstance(cell, (int, float)):
pass
# Is a string?
elif isinstance(cell, basestring):
# Blank cell?
if not cell:
conversion =... | 1,092,735 |
Handles the string case of cell and attempts auto-conversion
for auto_convert_cell.
Args:
parens_as_neg: Converts numerics surrounded by parens to negative values | def auto_convert_string_cell(flagable, cell_str, position, worksheet, flags,
units, parens_as_neg=True):
conversion = cell_str.strip()
# Wrapped?
if re.search(allregex.control_wrapping_regex, cell_str):
# Drop the wrapping characters
stripped_cell = cell_st... | 1,092,736 |
Compute checksum for each file in `directory`, with exception of files
specified in `blacklist`.
Args:
directory (str): Absolute or relative path to the directory.
blacklist (list/set/tuple): List of blacklisted filenames. Only
filenames are checked, not paths!
Returns:
... | def generate_hashfile(directory, blacklist=_BLACKLIST):
checksums = generate_checksums(directory, blacklist)
out = ""
for fn, checksum in sorted(checksums.items()):
out += "%s %s\n" % (checksum, fn)
return out | 1,092,886 |
Sort based on position. Sort with s as a tuple of the sort
indices and shape from first sort.
Parameters:
-----------
d -- the flds/sclr data
s -- (si, shape) sorting and shaping data from firstsort. | def flds_sort(d,s):
labels = [ key for key in d.keys()
if key not in ['t', 'xs', 'ys', 'zs', 'fd', 'sd'] ];
si,shape = s;
for l in labels:
d[l] = d[l][si].reshape(shape);
d[l] = np.squeeze(d[l]);
return d; | 1,093,007 |
print start/title/end info before and after the function call
Args:
title: title will show after the start, if has any | def as_session(name_or_func): # decorator
if callable(name_or_func): # no name provided
func = name_or_func
name = func.__name__
name = "".join([(' ' + x) if x.isupper() else x for x in name])
name = name.replace('_', ' ')
return as_session(name)(func) # deco(func) ->... | 1,093,192 |
获取函数的签名.
system.methodSignature('add') => [double, int, int]
Parameters:
method_name (str): - 要查看的函数名
Returns:
(str): - 签名文本 | def system_methodSignature(self, method_name: str)->str:
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
try:
method = resolve_dotted_attribute(
self.instance,
... | 1,093,260 |
注册一个实例用于执行,注意只能注册一个.
Parameters:
instance (Any): - 将一个类的实例注册到rpc
allow_dotted_names (bool): 是否允许带`.`号的名字 | def register_instance(self, instance: Any, allow_dotted_names: bool=False):
if self.instance:
raise RuntimeError("can only register one instance")
self.instance = instance
self.allow_dotted_names = allow_dotted_names
return True | 1,093,262 |
注册函数.
Parameters:
name (Optional[str]): - 将函数注册到的名字,如果为None,name就用其原来的名字 | def register_function(self, name: Optional[str]=None):
def wrap(function: Callable)->Any:
nonlocal name
if name is None:
name = function.__name__
self.funcs[name] = function
return function
return wrap | 1,093,263 |
设置计算密集型任务的执行器.
Parameters:
executor (futures.Executor): - 函数调用的执行器 | def set_executor(self, executor: futures.Executor):
self.loop.set_default_executor(executor)
self._func_executor = executor
return True | 1,093,264 |
执行注册的函数或者实例的方法.
如果函数或者方法是协程则执行协程,如果是函数则使用执行器执行,默认使用的是多进程.
Parameters:
ID (str): 任务的ID
method (str): 任务调用的函数名
args (Any): 位置参数
kwargs (Any): 关键字参数
Raise:
(RPCRuntimeError): - 当执行调用后抛出了异常,那就算做RPC运行时异常
Return:
(Any)... | async def apply(self, ID: str, method: str, *args: Any, **kwargs: Any):
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _... | 1,093,265 |
This method should be called every time through the main loop.
It handles showing the up, over, and down states of the button.
Parameters:
| eventObj - the event object obtained by calling pygame.event.get()
Returns:
| False most of the time
| True... | def handleEvent(self, eventObj):
if self.enterToActivate:
if eventObj.type == pygame.KEYDOWN:
# Return or Enter key
if eventObj.key == pygame.K_RETURN:
return True
if (eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, ... | 1,093,323 |
This method should be called every time through the main loop.
It handles showing the up, over, and down states of the button.
Parameters:
| eventObj - the event object obtained by calling pygame.event.get()
Returns:
| False most of the time
| True... | def handleEvent(self, eventObj):
if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) or not self.visible:
# The checkBox only cares bout mouse-related events (or no events, if it is invisible)
return False
if not self.isEnabled:
re... | 1,093,329 |
This method should be called every time through the main loop.
It handles showing the up, over, and down states of the button.
Parameters:
| eventObj - the event object obtained by calling pygame.event.get()
Returns:
| False most of the time
| True... | def handleEvent(self, eventObj):
if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) or not self.visible:
# The radioButton only cares bout mouse-related events (or no events, if it is invisible)
return False
if not self.isEnabled:
... | 1,093,334 |
This method should be called every time through the main loop.
It handles all of the keyboard key actions
Parameters:
| eventObj - the event object obtained by calling pygame.event.get()
Returns:
| False most of the time
| True when the user clicks... | def handleEvent(self, event):
if not self.isEnabled:
return False
if (event.type == pygame.MOUSEBUTTONDOWN) and (event.button == 1): # user clicked
theX, theY = event.pos
# if self.imageRect.collidepoint(pos):
if self.imageRect.collidep... | 1,093,344 |
This method should be called every time through the main loop.
It handles all of the dragging
Parameters:
| eventObj - the event object obtained by calling pygame.event.get()
Returns:
| False most of the time
| True when the user finishes dragging ... | def handleEvent(self, eventObj):
if not self.isEnabled:
return False
if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) :
# The dragger only cares about mouse-related events
return False
clicked = False
if eventO... | 1,093,349 |
rotates the image a given number of degrees
Parameters:
| nDegrees - the number of degrees you want the image rotated (images start at zero degrees).
| Positive numbers are clockwise, negative numbers are counter-clockwise | def rotate(self, nDegrees):
self.angle = self.angle + nDegrees
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV) | 1,093,355 |
rotates the image to a given angle
Parameters:
| angle - the angle that you want the image rotated to.
| Positive numbers are clockwise, negative numbers are counter-clockwise | def rotateTo(self, angle):
self._transmogrophy(angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV) | 1,093,356 |
scales an Image object
Parameters:
| percent - a percent of the original size
| numbers bigger than 100 scale up
| numbers less than 100 scale down
| 100 scales to the original size
Optional keyword parameters:
... | def scale(self, percent, scaleFromCenter=True):
self._transmogrophy(self.angle, percent, scaleFromCenter, self.flipH, self.flipV) | 1,093,357 |
Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show | def replace(self, key):
if not (key in self.imagesDict):
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy()
# Set the rect ... | 1,093,361 |
Return a human-readable dictionary from the inibin.
Arguments:
key_mapping -- Dictionary used for conversion. Supports nesting. Every other
value should be a numeric inibin key, or a tuple of the key and a
function to apply to the result.
inibin -- The dictionary returned from reading an in... | def _fix_keys(key_mapping, inibin, string_table=None):
if string_table is None:
string_table = {}
def walk(node, out_node):
# Walk the nodes of the key mapping
for key, value in node.items():
if isinstance(value, dict):
if key not in out_node:
... | 1,093,523 |
Shifts indicies as needed to account for one based indexing
Positive indicies need to be reduced by one to match with zero based
indexing.
Zero is not a valid input, and as such will throw a value error.
Arguments:
index - index to shift | def _setup_index(index):
index = int(index)
if index > 0:
index -= 1
elif index == 0:
# Zero indicies should not be allowed by default.
raise ValueError
return index | 1,093,973 |
Processes positions to account for ranges
Arguments:
positions - list of positions and/or ranges to process | def _setup_positions(self, positions):
updated_positions = []
for i, position in enumerate(positions):
ranger = re.search(r'(?P<start>-?\d*):(?P<end>\d*)', position)
if ranger:
if i > 0:
updated_positions.append(self.separator)
... | 1,093,976 |
Performs cut for range from start position to end
Arguments:
line - input to cut
start - start of range
current_position - current position in main cut function | def _cut_range(self, line, start, current_position):
result = []
try:
for j in range(start, len(line)):
index = _setup_index(j)
try:
result.append(line[index])
except IndexError:
result.append(se... | 1,093,977 |
Creates list of values in a range with output delimiters.
Arguments:
start - range start
end - range end | def _extendrange(self, start, end):
range_positions = []
for i in range(start, end):
if i != 0:
range_positions.append(str(i))
if i < end:
range_positions.append(self.separator)
return range_positions | 1,093,978 |
Format a relative time.
Args:
__timestamp: Event to generate relative timestamp against
Returns:
Human readable date and time offset | def human_timestamp(__timestamp: datetime.datetime) -> str:
numstr = '. a two three four five six seven eight nine ten'.split()
matches = [
60 * 60 * 24 * 365,
60 * 60 * 24 * 28,
60 * 60 * 24 * 7,
60 * 60 * 24,
60 * 60,
60,
1,
]
match_names =... | 1,094,154 |
Parse human readable frequency.
Args:
__delta: Frequency to parse | def parse_timedelta(__delta: str) -> datetime.timedelta:
match = re.fullmatch(r, __delta, re.IGNORECASE | re.VERBOSE)
if not match:
raise ValueError('Invalid ‘frequency’ value')
value, units = match.groups()
units_i = 'hdwmy'.index(units.lower())
# hours per hour/day/week/month/year
... | 1,094,155 |
Access nested value using dot separated keys
Args:
prop (:obj:`str`): Property in the form of dot separated keys
Returns:
Property value if exists, else `None` | def get_property(self, prop):
prop = prop.split('.')
root = self
for p in prop:
if p in root:
root = root[p]
else:
return None
return root | 1,094,724 |
Creates dict2 object from dict object
Args:
val (:obj:`dict`): Value to create from
Returns:
Equivalent dict2 object. | def from_dict(cls, val):
if isinstance(val, dict2):
return val
elif isinstance(val, dict):
res = cls()
for k, v in val.items():
res[k] = cls.from_dict(v)
return res
elif isinstance(val, list):
res = []
... | 1,094,726 |
Creates dict object from dict2 object
Args:
val (:obj:`dict2`): Value to create from
Returns:
Equivalent dict object. | def to_dict(self, val=UNSET):
if val is UNSET:
val = self
if isinstance(val, dict2) or isinstance(val, dict):
res = dict()
for k, v in val.items():
res[k] = self.to_dict(v)
return res
elif isinstance(val, list):
... | 1,094,727 |
You can pass in any of the Summon Search API parameters
(without the "s." prefix). For example to remove highlighting:
result = api.search("Web", hl=False)
See the Summon API documentation for the full list of possible
parameters:
http://api.summon.serialssolutions.com/he... | def search(self, q, **kwargs):
params = {"s.q": q}
for k, v in kwargs.items():
params["s." + k] = v
r = self._get("/2.0.0/search", params)
return r | 1,094,797 |
Processes a CMIP3 style file path.
The standard CMIP3 directory structure:
<experiment>/<variable_name>/<model>/<ensemble_member>/<CMOR filename>.nc
Filename is of pattern:
<model>-<experiment>-<variable_name>-<ensemble_member>.nc
Arguments:
fp (str): A file path conforming to C... | def get_fp_meta(fp):
# Copy metadata list then reverse to start at end of path
directory_meta = list(DIR_ATTS)
# Prefer meta extracted from filename
meta = get_dir_meta(fp, directory_meta)
meta.update(get_fname_meta(fp))
return meta | 1,094,849 |
Processes a CMIP3 style file name.
Filename is of pattern:
<model>-<experiment>-<variable_name>-<ensemble_member>.nc
Arguments:
fp (str): A file path/name conforming to DRS spec.
Returns:
dict: Metadata as extracted from the filename.
.. _Data Reference Syntax:
http:/... | def get_fname_meta(fp):
# Strip directory, extension, then split
if '/' in fp:
fp = os.path.split(fp)[1]
fname = os.path.splitext(fp)[0]
meta = fname.split('-')
res = {}
try:
for key in FNAME_ATTS:
res[key] = meta.pop(0)
except IndexError:
raise Pa... | 1,094,850 |
Set global variables to values defined in `config_dict`.
Args:
config_dict (dict): dict with data, which are used to set `globals`.
Note:
`config_dict` have to be dictionary, or it is ignored. Also all
variables, that are not already in globals, or are not types defined in
:att... | def _substitute_globals(config_dict):
constants = _get_all_constants()
if type(config_dict) != dict:
return
for key, val in config_dict.iteritems():
if key in constants and type(val) in _ALLOWED:
globals()[key] = val | 1,095,296 |
Convert an integer value to a character. a-z then double aa-zz etc
Args:
value (int): integer index we're looking up
capital (bool): whether we convert to capitals or not
Returns (str): alphanumeric representation of the index | def _get_variation_id(value, capital=False):
# Reinforcing type just in case a valid string was entered
value = int(value)
base_power = base_start = base_end = 0
while value >= base_end:
base_power += 1
base_start = base_end
base_end += pow(26... | 1,095,602 |
Import file directly.
This is a hack to import files from packages without importing
<package>/__init__.py, its purpose is to allow import without requiring
all the dependencies at this point.
Args:
package: Package to import from
fname: File to import
Returns:
Imported mod... | def import_file(package: str, fname: str) -> ModuleType:
mod_name = fname.rstrip('.py')
spec = spec_from_file_location(mod_name, '{}/{}'.format(package, fname))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module | 1,095,750 |
Prints the anagram results sorted by score to stdout.
Args:
input_word: the base word we searched on
anagrams: generator of (word, score) from anagrams_in_word
by_length: a boolean to declare printing by length instead of score | def pretty_print(input_word, anagrams, by_length=False):
scores = {}
if by_length:
noun = "tiles"
for word, score in anagrams:
try:
scores[len(word)].append("{0} ({1:d})".format(word, score))
except KeyError:
scores[len(word)] = ["{0}... | 1,095,946 |
Find closest tag for a git repository.
Note:
This defaults to `Semantic Version`_ tag matching.
Args:
__matcher: Glob-style tag pattern to match
strict: Allow commit-ish, if no tag found
git_dir: Repository to search
Returns:
Matching tag name
.. _Semantic Vers... | def find_tag(__matcher: str = 'v[0-9]*', *, strict: bool = True,
git_dir: str = '.') -> str:
command = 'git describe --abbrev=12 --dirty'.split()
with chdir(git_dir):
try:
stdout = check_output(command + ['--match={}'.format(__matcher), ])
except CalledProcessError:... | 1,095,949 |
Helper function to add a bias column to the input array X
Parameters:
X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray
with 2 dimension wheres every row corresponds to one example of the data
set, every column, one different feature.
Returns:
numpy.ndarray: The same i... | def add_bias(X):
return numpy.hstack((numpy.ones((len(X),1), dtype=X.dtype), X)) | 1,096,287 |
Unpack .zip archive in `file_obj` to given `path`. Make sure, that it
fits into limits (see :attr:`._max_zipfiles` for details).
Args:
file_obj (file): Opened file-like object.
path (str): Path into which the .zip will be unpacked.
Raises:
ValueError: If the... | def _unpack_zip(self, file_obj, path):
old_cwd = os.getcwd()
os.chdir(path)
zip_obj = zipfile.ZipFile(file_obj)
for cnt, zip_info in enumerate(zip_obj.infolist()):
zip_obj.extract(zip_info)
if cnt + 1 > self.max_zipfiles:
os.chdir(old_cw... | 1,096,381 |
Add archive to the storage and unpack it.
Args:
zip_file_obj (file): Opened file-like object.
Returns:
obj: Path where the `zip_file_obj` was unpacked wrapped in \
:class:`.PathAndHash` structure.
Raises:
ValueError: If there is too many fi... | def add_archive_as_dir(self, zip_file_obj):
BalancedDiscStorage._check_interface(zip_file_obj)
file_hash = self._get_hash(zip_file_obj)
dir_path = self._create_dir_path(file_hash)
full_path = os.path.join(dir_path, file_hash)
if os.path.exists(full_path):
s... | 1,096,382 |
Wraps a WSGI app and handles uncaught exceptions and defined exception and outputs a the exception in a
structured format.
Parameters:
- wsgi_app is the app.wsgi_app of flask,
- app_name should in correct format e.g. APP_NAME_1,
- app_logger is the logger object | def register_app_for_error_handling(wsgi_app, app_name, app_logger, custom_logging_service=None):
logging_service = LoggingService(app_logger) if custom_logging_service is None else custom_logging_service
exception_manager = ExceptionHandler(app_name, logging_service)
def wrapper(environ, start_respo... | 1,096,534 |
Return package path. Use uuid to generate package's directory name.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Returns:
str: Path to the root directory... | def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):
if book_id is None:
book_id = str(uuid.uuid4())
return os.path.join(prefix, book_id) | 1,096,733 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.