code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def scenario_list(self, default=True):
"""
Metadata regarding the list of scenarios (e.g., models, scenarios,
run identifier, etc.) in the connected data source.
Parameter
---------
default : bool, optional, default True
Return *only* the default version of each Scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions.
"""
default = 'true' if default else 'false'
add_url = 'runs?getOnlyDefaultRuns={}'
url = self.base_url + add_url.format(default)
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
return pd.read_json(r.content, orient='records') | Metadata regarding the list of scenarios (e.g., models, scenarios,
run identifier, etc.) in the connected data source.
Parameter
---------
default : bool, optional, default True
Return *only* the default version of each Scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions. | Below is the the instruction that describes the task:
### Input:
Metadata regarding the list of scenarios (e.g., models, scenarios,
run identifier, etc.) in the connected data source.
Parameter
---------
default : bool, optional, default True
Return *only* the default version of each Scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions.
### Response:
def scenario_list(self, default=True):
"""
Metadata regarding the list of scenarios (e.g., models, scenarios,
run identifier, etc.) in the connected data source.
Parameter
---------
default : bool, optional, default True
Return *only* the default version of each Scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions.
"""
default = 'true' if default else 'false'
add_url = 'runs?getOnlyDefaultRuns={}'
url = self.base_url + add_url.format(default)
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
return pd.read_json(r.content, orient='records') |
def im_files(self, room_id=None, user_name=None, **kwargs):
"""Retrieves the files from a direct message."""
if room_id:
return self.__call_api_get('im.files', roomId=room_id, kwargs=kwargs)
elif user_name:
return self.__call_api_get('im.files', username=user_name, kwargs=kwargs)
else:
raise RocketMissingParamException('roomId or username required') | Retrieves the files from a direct message. | Below is the the instruction that describes the task:
### Input:
Retrieves the files from a direct message.
### Response:
def im_files(self, room_id=None, user_name=None, **kwargs):
"""Retrieves the files from a direct message."""
if room_id:
return self.__call_api_get('im.files', roomId=room_id, kwargs=kwargs)
elif user_name:
return self.__call_api_get('im.files', username=user_name, kwargs=kwargs)
else:
raise RocketMissingParamException('roomId or username required') |
def warn (logname, msg, *args, **kwargs):
"""Log a warning.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.WARN):
_log(log.warn, msg, args, **kwargs) | Log a warning.
return: None | Below is the the instruction that describes the task:
### Input:
Log a warning.
return: None
### Response:
def warn (logname, msg, *args, **kwargs):
"""Log a warning.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.WARN):
_log(log.warn, msg, args, **kwargs) |
def _delete_branch_node(self, node, trie_key):
"""
Delete a key from inside or underneath a branch node
"""
if not trie_key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
node_to_delete = self.get_node(node[trie_key[0]])
sub_node = self._delete(node_to_delete, trie_key[1:])
encoded_sub_node = self._persist_node(sub_node)
if encoded_sub_node == node[trie_key[0]]:
return node
node[trie_key[0]] = encoded_sub_node
if encoded_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node | Delete a key from inside or underneath a branch node | Below is the the instruction that describes the task:
### Input:
Delete a key from inside or underneath a branch node
### Response:
def _delete_branch_node(self, node, trie_key):
"""
Delete a key from inside or underneath a branch node
"""
if not trie_key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
node_to_delete = self.get_node(node[trie_key[0]])
sub_node = self._delete(node_to_delete, trie_key[1:])
encoded_sub_node = self._persist_node(sub_node)
if encoded_sub_node == node[trie_key[0]]:
return node
node[trie_key[0]] = encoded_sub_node
if encoded_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node |
def stopFactory(self):
"""Stop the factory.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
"""
if self.client:
yield self.client.stopProducing()
protocol.ReconnectingClientFactory.stopFactory(self) | Stop the factory.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details. | Below is the the instruction that describes the task:
### Input:
Stop the factory.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
### Response:
def stopFactory(self):
"""Stop the factory.
See the documentation of
`twisted.internet.protocol.ReconnectingClientFactory` for details.
"""
if self.client:
yield self.client.stopProducing()
protocol.ReconnectingClientFactory.stopFactory(self) |
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max) | Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello'] | Below is the the instruction that describes the task:
### Input:
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
### Response:
def force_list(value, min=None, max=None):
"""
Check that a value is a list, coercing strings into
a list with one member. Useful where users forget the
trailing comma that turns a single value into a list.
You can optionally specify the minimum and maximum number of members.
A minumum of greater than one will fail if the user only supplies a
string.
>>> vtor = Validator()
>>> vtor.check('force_list', ())
[]
>>> vtor.check('force_list', [])
[]
>>> vtor.check('force_list', 'hello')
['hello']
"""
if not isinstance(value, (list, tuple)):
value = [value]
return is_list(value, min, max) |
def create_packet(header, data):
"""Creates an IncomingPacket object from header and data
This method is for testing purposes
"""
packet = IncomingPacket()
packet.header = header
packet.data = data
if len(header) == HeronProtocol.HEADER_SIZE:
packet.is_header_read = True
if len(data) == packet.get_datasize():
packet.is_complete = True
return packet | Creates an IncomingPacket object from header and data
This method is for testing purposes | Below is the the instruction that describes the task:
### Input:
Creates an IncomingPacket object from header and data
This method is for testing purposes
### Response:
def create_packet(header, data):
"""Creates an IncomingPacket object from header and data
This method is for testing purposes
"""
packet = IncomingPacket()
packet.header = header
packet.data = data
if len(header) == HeronProtocol.HEADER_SIZE:
packet.is_header_read = True
if len(data) == packet.get_datasize():
packet.is_complete = True
return packet |
def get_X(self):
'''
:return: term freq matrix or metadata freq matrix
'''
if self._use_non_text_features:
return self._term_doc_matrix._mX
else:
return self._term_doc_matrix._X | :return: term freq matrix or metadata freq matrix | Below is the the instruction that describes the task:
### Input:
:return: term freq matrix or metadata freq matrix
### Response:
def get_X(self):
'''
:return: term freq matrix or metadata freq matrix
'''
if self._use_non_text_features:
return self._term_doc_matrix._mX
else:
return self._term_doc_matrix._X |
def update_dependency_kinds(apps, schema_editor):
"""Update historical dependency kinds as they may be wrong."""
DataDependency = apps.get_model('flow', 'DataDependency')
for dependency in DataDependency.objects.all():
# Assume dependency is of subprocess kind.
dependency.kind = 'subprocess'
# Check child inputs to determine if this is an IO dependency.
child = dependency.child
parent = dependency.parent
for field_schema, fields in iterate_fields(child.input, child.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
if value == parent.pk:
dependency.kind = 'io'
break
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
if value == parent.pk:
dependency.kind = 'io'
break
dependency.save() | Update historical dependency kinds as they may be wrong. | Below is the the instruction that describes the task:
### Input:
Update historical dependency kinds as they may be wrong.
### Response:
def update_dependency_kinds(apps, schema_editor):
"""Update historical dependency kinds as they may be wrong."""
DataDependency = apps.get_model('flow', 'DataDependency')
for dependency in DataDependency.objects.all():
# Assume dependency is of subprocess kind.
dependency.kind = 'subprocess'
# Check child inputs to determine if this is an IO dependency.
child = dependency.child
parent = dependency.parent
for field_schema, fields in iterate_fields(child.input, child.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
if value == parent.pk:
dependency.kind = 'io'
break
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
if value == parent.pk:
dependency.kind = 'io'
break
dependency.save() |
def add_song(fpath, g_songs, g_artists, g_albums):
"""
parse music file metadata with Easymp3 and return a song
model.
"""
try:
if fpath.endswith('mp3') or fpath.endswith('ogg') or fpath.endswith('wma'):
metadata = EasyMP3(fpath)
elif fpath.endswith('m4a'):
metadata = EasyMP4(fpath)
except MutagenError as e:
logger.exception('Mutagen parse metadata failed, ignore.')
return None
metadata_dict = dict(metadata)
for key in metadata.keys():
metadata_dict[key] = metadata_dict[key][0]
if 'title' not in metadata_dict:
title = fpath.rsplit('/')[-1].split('.')[0]
metadata_dict['title'] = title
metadata_dict.update(dict(
url=fpath,
duration=metadata.info.length * 1000 # milesecond
))
schema = EasyMP3MetadataSongSchema(strict=True)
try:
data, _ = schema.load(metadata_dict)
except ValidationError:
logger.exeception('解析音乐文件({}) 元数据失败'.format(fpath))
return
# NOTE: use {title}-{artists_name}-{album_name} as song identifier
title = data['title']
album_name = data['album_name']
artist_name_list = [
name.strip()
for name in re.split(r'[,&]', data['artists_name'])]
artists_name = ','.join(artist_name_list)
duration = data['duration']
album_artist_name = data['album_artist_name']
# 生成 song model
# 用来生成 id 的字符串应该尽量减少无用信息,这样或许能减少 id 冲突概率
song_id_str = ''.join([title, artists_name, album_name, str(int(duration))])
song_id = gen_id(song_id_str)
if song_id not in g_songs:
# 剩下 album, lyric 三个字段没有初始化
song = LSongModel(identifier=song_id,
artists=[],
title=title,
url=fpath,
duration=duration,
comments=[],
# 下面这些字段不向外暴露
genre=data['genre'],
cover=data['cover'],
date=data['date'],
desc=data['desc'],
disc=data['disc'],
track=data['track'])
g_songs[song_id] = song
else:
song = g_songs[song_id]
logger.debug('Duplicate song: %s %s', song.url, fpath)
return
# 生成 album artist model
album_artist_id = gen_id(album_artist_name)
if album_artist_id not in g_artists:
album_artist = create_artist(album_artist_id, album_artist_name)
g_artists[album_artist_id] = album_artist
else:
album_artist = g_artists[album_artist_id]
# 生成 album model
album_id_str = album_name + album_artist_name
album_id = gen_id(album_id_str)
if album_id not in g_albums:
album = create_album(album_id, album_name)
g_albums[album_id] = album
else:
album = g_albums[album_id]
# 处理专辑的歌手信息和歌曲信息,专辑歌手的专辑列表信息
if album not in album_artist.albums:
album_artist.albums.append(album)
if album_artist not in album.artists:
album.artists.append(album_artist)
if song not in album.songs:
album.songs.append(song)
# 处理歌曲的歌手和专辑信息,以及歌手的歌曲列表
song.album = album
for artist_name in artist_name_list:
artist_id = gen_id(artist_name)
if artist_id in g_artists:
artist = g_artists[artist_id]
else:
artist = create_artist(identifier=artist_id, name=artist_name)
g_artists[artist_id] = artist
if artist not in song.artists:
song.artists.append(artist)
if song not in artist.songs:
artist.songs.append(song) | parse music file metadata with Easymp3 and return a song
model. | Below is the the instruction that describes the task:
### Input:
parse music file metadata with Easymp3 and return a song
model.
### Response:
def add_song(fpath, g_songs, g_artists, g_albums):
"""
parse music file metadata with Easymp3 and return a song
model.
"""
try:
if fpath.endswith('mp3') or fpath.endswith('ogg') or fpath.endswith('wma'):
metadata = EasyMP3(fpath)
elif fpath.endswith('m4a'):
metadata = EasyMP4(fpath)
except MutagenError as e:
logger.exception('Mutagen parse metadata failed, ignore.')
return None
metadata_dict = dict(metadata)
for key in metadata.keys():
metadata_dict[key] = metadata_dict[key][0]
if 'title' not in metadata_dict:
title = fpath.rsplit('/')[-1].split('.')[0]
metadata_dict['title'] = title
metadata_dict.update(dict(
url=fpath,
duration=metadata.info.length * 1000 # milesecond
))
schema = EasyMP3MetadataSongSchema(strict=True)
try:
data, _ = schema.load(metadata_dict)
except ValidationError:
logger.exeception('解析音乐文件({}) 元数据失败'.format(fpath))
return
# NOTE: use {title}-{artists_name}-{album_name} as song identifier
title = data['title']
album_name = data['album_name']
artist_name_list = [
name.strip()
for name in re.split(r'[,&]', data['artists_name'])]
artists_name = ','.join(artist_name_list)
duration = data['duration']
album_artist_name = data['album_artist_name']
# 生成 song model
# 用来生成 id 的字符串应该尽量减少无用信息,这样或许能减少 id 冲突概率
song_id_str = ''.join([title, artists_name, album_name, str(int(duration))])
song_id = gen_id(song_id_str)
if song_id not in g_songs:
# 剩下 album, lyric 三个字段没有初始化
song = LSongModel(identifier=song_id,
artists=[],
title=title,
url=fpath,
duration=duration,
comments=[],
# 下面这些字段不向外暴露
genre=data['genre'],
cover=data['cover'],
date=data['date'],
desc=data['desc'],
disc=data['disc'],
track=data['track'])
g_songs[song_id] = song
else:
song = g_songs[song_id]
logger.debug('Duplicate song: %s %s', song.url, fpath)
return
# 生成 album artist model
album_artist_id = gen_id(album_artist_name)
if album_artist_id not in g_artists:
album_artist = create_artist(album_artist_id, album_artist_name)
g_artists[album_artist_id] = album_artist
else:
album_artist = g_artists[album_artist_id]
# 生成 album model
album_id_str = album_name + album_artist_name
album_id = gen_id(album_id_str)
if album_id not in g_albums:
album = create_album(album_id, album_name)
g_albums[album_id] = album
else:
album = g_albums[album_id]
# 处理专辑的歌手信息和歌曲信息,专辑歌手的专辑列表信息
if album not in album_artist.albums:
album_artist.albums.append(album)
if album_artist not in album.artists:
album.artists.append(album_artist)
if song not in album.songs:
album.songs.append(song)
# 处理歌曲的歌手和专辑信息,以及歌手的歌曲列表
song.album = album
for artist_name in artist_name_list:
artist_id = gen_id(artist_name)
if artist_id in g_artists:
artist = g_artists[artist_id]
else:
artist = create_artist(identifier=artist_id, name=artist_name)
g_artists[artist_id] = artist
if artist not in song.artists:
song.artists.append(artist)
if song not in artist.songs:
artist.songs.append(song) |
def add(self, *names):
'''Returns back a class decorator that enables registering Blox to this factory'''
def decorator(blok):
for name in names or (blok.__name__, ):
self[name] = blok
return blok
return decorator | Returns back a class decorator that enables registering Blox to this factory | Below is the the instruction that describes the task:
### Input:
Returns back a class decorator that enables registering Blox to this factory
### Response:
def add(self, *names):
'''Returns back a class decorator that enables registering Blox to this factory'''
def decorator(blok):
for name in names or (blok.__name__, ):
self[name] = blok
return blok
return decorator |
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel) | Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. | Below is the the instruction that describes the task:
### Input:
Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders.
### Response:
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel) |
def generate_node_command(command, command_opts, path):
"""Return node bin command list for subprocess execution."""
if which(NPX_BIN):
# Use npx if available (npm v5.2+)
LOGGER.debug("Using npx to invoke %s.", command)
if platform.system().lower() == 'windows':
cmd_list = [NPX_BIN,
'-c',
"%s %s" % (command, ' '.join(command_opts))]
else:
# The nested app-through-npx-via-subprocess command invocation
# requires this redundant quoting
cmd_list = [NPX_BIN,
'-c',
"''%s %s''" % (command, ' '.join(command_opts))]
else:
LOGGER.debug('npx not found; falling back invoking %s shell script '
'directly.', command)
cmd_list = [
os.path.join(path,
'node_modules',
'.bin',
command)
] + command_opts
return cmd_list | Return node bin command list for subprocess execution. | Below is the the instruction that describes the task:
### Input:
Return node bin command list for subprocess execution.
### Response:
def generate_node_command(command, command_opts, path):
"""Return node bin command list for subprocess execution."""
if which(NPX_BIN):
# Use npx if available (npm v5.2+)
LOGGER.debug("Using npx to invoke %s.", command)
if platform.system().lower() == 'windows':
cmd_list = [NPX_BIN,
'-c',
"%s %s" % (command, ' '.join(command_opts))]
else:
# The nested app-through-npx-via-subprocess command invocation
# requires this redundant quoting
cmd_list = [NPX_BIN,
'-c',
"''%s %s''" % (command, ' '.join(command_opts))]
else:
LOGGER.debug('npx not found; falling back invoking %s shell script '
'directly.', command)
cmd_list = [
os.path.join(path,
'node_modules',
'.bin',
command)
] + command_opts
return cmd_list |
def print_obj(arg, frame, format=None, short=False):
"""Return a string representation of an object """
try:
if not frame:
# ?? Should we have set up a dummy globals
# to have persistence?
obj = eval(arg, None, None)
else:
obj = eval(arg, frame.f_globals, frame.f_locals)
pass
except:
return 'No symbol "' + arg + '" in current context.'
# format and print
what = arg
if format:
what = format + ' ' + arg
obj = printf(obj, format)
s = '%s = %s' % (what, obj)
if not short:
s += '\ntype = %s' % type(obj)
if callable(obj):
argspec = print_argspec(obj, arg)
if argspec:
s += ':\n\t'
if inspect.isclass(obj):
s += 'Class constructor information:\n\t'
obj = obj.__init__
elif isinstance(obj, types.InstanceType):
obj = obj.__call__
pass
s+= argspec
pass
# Try to list the members of a class.
# Not sure if this is correct or the
# best way to do.
s = print_dict(s, obj, "object variables")
if hasattr(obj, "__class__"):
s = print_dict(s, obj.__class__, "class variables")
pass
return s | Return a string representation of an object | Below is the the instruction that describes the task:
### Input:
Return a string representation of an object
### Response:
def print_obj(arg, frame, format=None, short=False):
"""Return a string representation of an object """
try:
if not frame:
# ?? Should we have set up a dummy globals
# to have persistence?
obj = eval(arg, None, None)
else:
obj = eval(arg, frame.f_globals, frame.f_locals)
pass
except:
return 'No symbol "' + arg + '" in current context.'
# format and print
what = arg
if format:
what = format + ' ' + arg
obj = printf(obj, format)
s = '%s = %s' % (what, obj)
if not short:
s += '\ntype = %s' % type(obj)
if callable(obj):
argspec = print_argspec(obj, arg)
if argspec:
s += ':\n\t'
if inspect.isclass(obj):
s += 'Class constructor information:\n\t'
obj = obj.__init__
elif isinstance(obj, types.InstanceType):
obj = obj.__call__
pass
s+= argspec
pass
# Try to list the members of a class.
# Not sure if this is correct or the
# best way to do.
s = print_dict(s, obj, "object variables")
if hasattr(obj, "__class__"):
s = print_dict(s, obj.__class__, "class variables")
pass
return s |
def get_jclass(classname):
"""
Returns the Java class object associated with the dot-notation classname.
:param classname: the classname
:type classname: str
:return: the class object
:rtype: JB_Object
"""
try:
return javabridge.class_for_name(classname)
except:
return javabridge.static_call(
"Lweka/core/ClassHelper;", "forName",
"(Ljava/lang/Class;Ljava/lang/String;)Ljava/lang/Class;",
javabridge.class_for_name("java.lang.Object"), classname) | Returns the Java class object associated with the dot-notation classname.
:param classname: the classname
:type classname: str
:return: the class object
:rtype: JB_Object | Below is the the instruction that describes the task:
### Input:
Returns the Java class object associated with the dot-notation classname.
:param classname: the classname
:type classname: str
:return: the class object
:rtype: JB_Object
### Response:
def get_jclass(classname):
"""
Returns the Java class object associated with the dot-notation classname.
:param classname: the classname
:type classname: str
:return: the class object
:rtype: JB_Object
"""
try:
return javabridge.class_for_name(classname)
except:
return javabridge.static_call(
"Lweka/core/ClassHelper;", "forName",
"(Ljava/lang/Class;Ljava/lang/String;)Ljava/lang/Class;",
javabridge.class_for_name("java.lang.Object"), classname) |
def randomize_nb_elements(
self,
number=10,
le=False,
ge=False,
min=None,
max=None):
"""
Returns a random value near number.
:param number: value to which the result must be near
:param le: result must be lower or equal to number
:param ge: result must be greater or equal to number
:returns: a random int near number
"""
if le and ge:
return number
_min = 100 if ge else 60
_max = 100 if le else 140
nb = int(number * self.generator.random.randint(_min, _max) / 100)
if min is not None and nb < min:
nb = min
if max is not None and nb > min:
nb = max
return nb | Returns a random value near number.
:param number: value to which the result must be near
:param le: result must be lower or equal to number
:param ge: result must be greater or equal to number
:returns: a random int near number | Below is the the instruction that describes the task:
### Input:
Returns a random value near number.
:param number: value to which the result must be near
:param le: result must be lower or equal to number
:param ge: result must be greater or equal to number
:returns: a random int near number
### Response:
def randomize_nb_elements(
self,
number=10,
le=False,
ge=False,
min=None,
max=None):
"""
Returns a random value near number.
:param number: value to which the result must be near
:param le: result must be lower or equal to number
:param ge: result must be greater or equal to number
:returns: a random int near number
"""
if le and ge:
return number
_min = 100 if ge else 60
_max = 100 if le else 140
nb = int(number * self.generator.random.randint(_min, _max) / 100)
if min is not None and nb < min:
nb = min
if max is not None and nb > min:
nb = max
return nb |
def summarize(self, n_timescales_to_report=5):
"""Some summary information."""
nonzeros = np.sum(np.abs(self.eigenvectors_) > 0, axis=0)
active = '[%s]' % ', '.join(['%d/%d' % (n, self.n_features) for n in nonzeros[:n_timescales_to_report]])
return """K-sparse time-structure Independent Components Analysis (tICA)
------------------------------------------------------------------
n_components : {n_components}
shrinkage : {shrinkage}
lag_time : {lag_time}
kinetic_mapping : {kinetic_mapping}
n_features : {n_features}
Top {n_timescales_to_report} timescales :
{timescales}
Top {n_timescales_to_report} eigenvalues :
{eigenvalues}
Number of active degrees of freedom:
{active}
""".format(n_components=self.n_components, shrinkage=self.shrinkage_, lag_time=self.lag_time,
kinetic_mapping=self.kinetic_mapping,
timescales=self.timescales_[:n_timescales_to_report], eigenvalues=self.eigenvalues_[:n_timescales_to_report],
n_features=self.n_features, active=active, n_timescales_to_report=n_timescales_to_report) | Some summary information. | Below is the the instruction that describes the task:
### Input:
Some summary information.
### Response:
def summarize(self, n_timescales_to_report=5):
"""Some summary information."""
nonzeros = np.sum(np.abs(self.eigenvectors_) > 0, axis=0)
active = '[%s]' % ', '.join(['%d/%d' % (n, self.n_features) for n in nonzeros[:n_timescales_to_report]])
return """K-sparse time-structure Independent Components Analysis (tICA)
------------------------------------------------------------------
n_components : {n_components}
shrinkage : {shrinkage}
lag_time : {lag_time}
kinetic_mapping : {kinetic_mapping}
n_features : {n_features}
Top {n_timescales_to_report} timescales :
{timescales}
Top {n_timescales_to_report} eigenvalues :
{eigenvalues}
Number of active degrees of freedom:
{active}
""".format(n_components=self.n_components, shrinkage=self.shrinkage_, lag_time=self.lag_time,
kinetic_mapping=self.kinetic_mapping,
timescales=self.timescales_[:n_timescales_to_report], eigenvalues=self.eigenvalues_[:n_timescales_to_report],
n_features=self.n_features, active=active, n_timescales_to_report=n_timescales_to_report) |
def enable_service(conn, service='ceph'):
"""
Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'enable',
'{service}'.format(service=service),
]
)
else:
remoto.process.run(
conn,
[
'chkconfig',
'{service}'.format(service=service),
'on',
]
) | Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection. | Below is the the instruction that describes the task:
### Input:
Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
### Response:
def enable_service(conn, service='ceph'):
"""
Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'enable',
'{service}'.format(service=service),
]
)
else:
remoto.process.run(
conn,
[
'chkconfig',
'{service}'.format(service=service),
'on',
]
) |
def _decode_content(self, content):
"""
Split node string to uri and content and convert back to unicode.
"""
content = smart_unicode(content)
uri, _, content = content.partition(u'|')
if content == self.NONE:
content = None
return uri or None, content | Split node string to uri and content and convert back to unicode. | Below is the the instruction that describes the task:
### Input:
Split node string to uri and content and convert back to unicode.
### Response:
def _decode_content(self, content):
"""
Split node string to uri and content and convert back to unicode.
"""
content = smart_unicode(content)
uri, _, content = content.partition(u'|')
if content == self.NONE:
content = None
return uri or None, content |
def trace_emit(self):
"""
A decorator that allows to change the trace emitter.
By default Python logging is used to emit the trace data.
"""
def decorator(f):
self.tracer.emitter = f
return f
return decorator | A decorator that allows to change the trace emitter.
By default Python logging is used to emit the trace data. | Below is the the instruction that describes the task:
### Input:
A decorator that allows to change the trace emitter.
By default Python logging is used to emit the trace data.
### Response:
def trace_emit(self):
"""
A decorator that allows to change the trace emitter.
By default Python logging is used to emit the trace data.
"""
def decorator(f):
self.tracer.emitter = f
return f
return decorator |
def fmt_num(num, zero_num=None):
""" humanize number(9000 to 9,000) """
if zero_num is not None:
num = floatformat(num, zero_num)
return intcomma(num, False) | humanize number(9000 to 9,000) | Below is the the instruction that describes the task:
### Input:
humanize number(9000 to 9,000)
### Response:
def fmt_num(num, zero_num=None):
""" humanize number(9000 to 9,000) """
if zero_num is not None:
num = floatformat(num, zero_num)
return intcomma(num, False) |
def monte_carlo_vol(self, ndraws=10000, rstate=None,
return_overlap=True):
"""Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of ellipsoids. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube."""
if rstate is None:
rstate = np.random
# Estimate volume using Monte Carlo integration.
samples = [self.sample(rstate=rstate, return_q=True)
for i in range(ndraws)]
qsum = sum([q for (x, idx, q) in samples])
vol = 1. * ndraws / qsum * self.vol_tot
if return_overlap:
# Estimate the fractional amount of overlap with the
# unit cube using the same set of samples.
qin = sum([q * unitcheck(x) for (x, idx, q) in samples])
overlap = 1. * qin / qsum
return vol, overlap
else:
return vol | Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of ellipsoids. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube. | Below is the the instruction that describes the task:
### Input:
Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of ellipsoids. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube.
### Response:
def monte_carlo_vol(self, ndraws=10000, rstate=None,
return_overlap=True):
"""Using `ndraws` Monte Carlo draws, estimate the volume of the
*union* of ellipsoids. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube."""
if rstate is None:
rstate = np.random
# Estimate volume using Monte Carlo integration.
samples = [self.sample(rstate=rstate, return_q=True)
for i in range(ndraws)]
qsum = sum([q for (x, idx, q) in samples])
vol = 1. * ndraws / qsum * self.vol_tot
if return_overlap:
# Estimate the fractional amount of overlap with the
# unit cube using the same set of samples.
qin = sum([q * unitcheck(x) for (x, idx, q) in samples])
overlap = 1. * qin / qsum
return vol, overlap
else:
return vol |
def tfpdef(self, ident_tok, annotation_opt):
"""(3.0-) tfpdef: NAME [':' test]"""
if annotation_opt:
colon_loc, annotation = annotation_opt
return self._arg(ident_tok, colon_loc, annotation)
return self._arg(ident_tok) | (3.0-) tfpdef: NAME [':' test] | Below is the the instruction that describes the task:
### Input:
(3.0-) tfpdef: NAME [':' test]
### Response:
def tfpdef(self, ident_tok, annotation_opt):
"""(3.0-) tfpdef: NAME [':' test]"""
if annotation_opt:
colon_loc, annotation = annotation_opt
return self._arg(ident_tok, colon_loc, annotation)
return self._arg(ident_tok) |
def has_group_perms(self, perm, obj, approved):
"""
Check if group has the permission for the given object
"""
if not self.group:
return False
if self.use_smart_cache:
content_type_pk = Permission.objects.get_content_type(obj).pk
def _group_has_perms(cached_perms):
# Check to see if the permission is in the cache.
return cached_perms.get((
obj.pk,
content_type_pk,
perm,
approved,
))
# Check to see if the permission is in the cache.
return _group_has_perms(self._group_perm_cache)
# Actually hit the DB, no smart cache used.
return Permission.objects.group_permissions(
self.group,
perm, obj,
approved,
).filter(
object_id=obj.pk,
).exists() | Check if group has the permission for the given object | Below is the the instruction that describes the task:
### Input:
Check if group has the permission for the given object
### Response:
def has_group_perms(self, perm, obj, approved):
"""
Check if group has the permission for the given object
"""
if not self.group:
return False
if self.use_smart_cache:
content_type_pk = Permission.objects.get_content_type(obj).pk
def _group_has_perms(cached_perms):
# Check to see if the permission is in the cache.
return cached_perms.get((
obj.pk,
content_type_pk,
perm,
approved,
))
# Check to see if the permission is in the cache.
return _group_has_perms(self._group_perm_cache)
# Actually hit the DB, no smart cache used.
return Permission.objects.group_permissions(
self.group,
perm, obj,
approved,
).filter(
object_id=obj.pk,
).exists() |
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts | For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower. | Below is the the instruction that describes the task:
### Input:
For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
### Response:
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts |
def timeline(self):
"""
>>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline
0
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline
1
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline
0
"""
if self.history:
if self.history.lines:
try:
return int(self.history.lines[-1][0]) + 1
except Exception:
logger.error('Failed to parse cluster history from DCS: %s', self.history.lines)
elif self.history.value == '[]':
return 1
return 0 | >>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline
0
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline
1
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline
0 | Below is the the instruction that describes the task:
### Input:
>>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline
0
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline
1
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline
0
### Response:
def timeline(self):
"""
>>> Cluster(0, 0, 0, 0, 0, 0, 0, 0).timeline
0
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[]')).timeline
1
>>> Cluster(0, 0, 0, 0, 0, 0, 0, TimelineHistory.from_node(1, '[["a"]]')).timeline
0
"""
if self.history:
if self.history.lines:
try:
return int(self.history.lines[-1][0]) + 1
except Exception:
logger.error('Failed to parse cluster history from DCS: %s', self.history.lines)
elif self.history.value == '[]':
return 1
return 0 |
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the ConstructResult via the visitor pattern."""
new_fields = {}
for key, value in six.iteritems(self.fields):
new_value = value.visit_and_update(visitor_fn)
if new_value is not value:
new_fields[key] = new_value
if new_fields:
return ConstructResult(dict(self.fields, **new_fields))
else:
return self | Create an updated version (if needed) of the ConstructResult via the visitor pattern. | Below is the the instruction that describes the task:
### Input:
Create an updated version (if needed) of the ConstructResult via the visitor pattern.
### Response:
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the ConstructResult via the visitor pattern."""
new_fields = {}
for key, value in six.iteritems(self.fields):
new_value = value.visit_and_update(visitor_fn)
if new_value is not value:
new_fields[key] = new_value
if new_fields:
return ConstructResult(dict(self.fields, **new_fields))
else:
return self |
def parse(self):
"""
Parse a replay
"""
self.important("Parsing demo file '%s'" % (self.filename, ))
with open(self.filename, 'rb') as f:
reader = Reader(StringIO(f.read()))
filestamp = reader.read(8)
offset = reader.read_int32()
if filestamp != "PBUFDEM\x00":
raise ValueError("Invalid replay - incorrect filestamp")
buff = StringIO(f.read())
frame = 0
more = True
while more and reader.remaining > 0:
cmd = reader.read_vint32()
tick = reader.read_vint32()
compressed = False
if cmd & demo_pb2.DEM_IsCompressed:
compressed = True
cmd = cmd & ~demo_pb2.DEM_IsCompressed
if cmd not in messages.MESSAGE_TYPES:
raise KeyError("Unknown message type found")
message_type = messages.MESSAGE_TYPES[cmd]
message = reader.read_message(message_type, compressed)
self.info('%s: %s' % (frame, message_type))
self.worthless(message)
self.run_hooks(message)
self.info('|%s' % ('-' * 79, ))
frame += 1
if self.frames and frame > self.frames:
break | Parse a replay | Below is the the instruction that describes the task:
### Input:
Parse a replay
### Response:
def parse(self):
"""
Parse a replay
"""
self.important("Parsing demo file '%s'" % (self.filename, ))
with open(self.filename, 'rb') as f:
reader = Reader(StringIO(f.read()))
filestamp = reader.read(8)
offset = reader.read_int32()
if filestamp != "PBUFDEM\x00":
raise ValueError("Invalid replay - incorrect filestamp")
buff = StringIO(f.read())
frame = 0
more = True
while more and reader.remaining > 0:
cmd = reader.read_vint32()
tick = reader.read_vint32()
compressed = False
if cmd & demo_pb2.DEM_IsCompressed:
compressed = True
cmd = cmd & ~demo_pb2.DEM_IsCompressed
if cmd not in messages.MESSAGE_TYPES:
raise KeyError("Unknown message type found")
message_type = messages.MESSAGE_TYPES[cmd]
message = reader.read_message(message_type, compressed)
self.info('%s: %s' % (frame, message_type))
self.worthless(message)
self.run_hooks(message)
self.info('|%s' % ('-' * 79, ))
frame += 1
if self.frames and frame > self.frames:
break |
def _diff(state_data, resource_object):
'''helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
'''
state_data['id'] = resource_object['schedule']['id']
objects_differ = None
# first check all the easy top-level properties: everything except the schedule_layers.
for k, v in state_data['schedule'].items():
if k == 'schedule_layers':
continue
if v != resource_object['schedule'][k]:
objects_differ = '{0} {1} {2}'.format(k, v, resource_object['schedule'][k])
break
# check schedule_layers
if not objects_differ:
for layer in state_data['schedule']['schedule_layers']:
# find matching layer name
resource_layer = None
for resource_layer in resource_object['schedule']['schedule_layers']:
found = False
if layer['name'] == resource_layer['name']:
found = True
break
if not found:
objects_differ = 'layer {0} missing'.format(layer['name'])
break
# set the id, so that we will update this layer instead of creating a new one
layer['id'] = resource_layer['id']
# compare contents of layer and resource_layer
for k, v in layer.items():
if k == 'users':
continue
if k == 'start':
continue
if v != resource_layer[k]:
objects_differ = 'layer {0} key {1} {2} != {3}'.format(layer['name'], k, v, resource_layer[k])
break
if objects_differ:
break
# compare layer['users']
if len(layer['users']) != len(resource_layer['users']):
objects_differ = 'num users in layer {0} {1} != {2}'.format(layer['name'], len(layer['users']), len(resource_layer['users']))
break
for user1 in layer['users']:
found = False
user2 = None
for user2 in resource_layer['users']:
# deal with PD API bug: when you submit member_order=N, you get back member_order=N+1
if user1['member_order'] == user2['member_order'] - 1:
found = True
break
if not found:
objects_differ = 'layer {0} no one with member_order {1}'.format(layer['name'], user1['member_order'])
break
if user1['user']['id'] != user2['user']['id']:
objects_differ = 'layer {0} user at member_order {1} {2} != {3}'.format(layer['name'],
user1['member_order'],
user1['user']['id'],
user2['user']['id'])
break
if objects_differ:
return state_data
else:
return {} | helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update. | Below is the the instruction that describes the task:
### Input:
helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
### Response:
def _diff(state_data, resource_object):
'''helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
'''
state_data['id'] = resource_object['schedule']['id']
objects_differ = None
# first check all the easy top-level properties: everything except the schedule_layers.
for k, v in state_data['schedule'].items():
if k == 'schedule_layers':
continue
if v != resource_object['schedule'][k]:
objects_differ = '{0} {1} {2}'.format(k, v, resource_object['schedule'][k])
break
# check schedule_layers
if not objects_differ:
for layer in state_data['schedule']['schedule_layers']:
# find matching layer name
resource_layer = None
for resource_layer in resource_object['schedule']['schedule_layers']:
found = False
if layer['name'] == resource_layer['name']:
found = True
break
if not found:
objects_differ = 'layer {0} missing'.format(layer['name'])
break
# set the id, so that we will update this layer instead of creating a new one
layer['id'] = resource_layer['id']
# compare contents of layer and resource_layer
for k, v in layer.items():
if k == 'users':
continue
if k == 'start':
continue
if v != resource_layer[k]:
objects_differ = 'layer {0} key {1} {2} != {3}'.format(layer['name'], k, v, resource_layer[k])
break
if objects_differ:
break
# compare layer['users']
if len(layer['users']) != len(resource_layer['users']):
objects_differ = 'num users in layer {0} {1} != {2}'.format(layer['name'], len(layer['users']), len(resource_layer['users']))
break
for user1 in layer['users']:
found = False
user2 = None
for user2 in resource_layer['users']:
# deal with PD API bug: when you submit member_order=N, you get back member_order=N+1
if user1['member_order'] == user2['member_order'] - 1:
found = True
break
if not found:
objects_differ = 'layer {0} no one with member_order {1}'.format(layer['name'], user1['member_order'])
break
if user1['user']['id'] != user2['user']['id']:
objects_differ = 'layer {0} user at member_order {1} {2} != {3}'.format(layer['name'],
user1['member_order'],
user1['user']['id'],
user2['user']['id'])
break
if objects_differ:
return state_data
else:
return {} |
def getChanges(self, request):
"""
Reponds only to POST events and starts the build process
:arguments:
request
the http request object
"""
expected_secret = isinstance(self.options, dict) and self.options.get('secret')
if expected_secret:
received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
received_secret = bytes2unicode(received_secret)
p = Properties()
p.master = self.master
expected_secret_value = yield p.render(expected_secret)
if received_secret != expected_secret_value:
raise ValueError("Invalid secret")
try:
content = request.content.read()
payload = json.loads(bytes2unicode(content))
except Exception as e:
raise ValueError("Error loading JSON: " + str(e))
event_type = request.getHeader(_HEADER_EVENT)
event_type = bytes2unicode(event_type)
# newer version of gitlab have a object_kind parameter,
# which allows not to use the http header
event_type = payload.get('object_kind', event_type)
codebase = request.args.get(b'codebase', [None])[0]
codebase = bytes2unicode(codebase)
if event_type in ("push", "tag_push", "Push Hook"):
user = payload['user_name']
repo = payload['repository']['name']
repo_url = payload['repository']['url']
changes = self._process_change(
payload, user, repo, repo_url, event_type, codebase=codebase)
elif event_type == 'merge_request':
changes = self._process_merge_request_change(
payload, event_type, codebase=codebase)
else:
changes = []
if changes:
log.msg("Received {} changes from {} gitlab event".format(
len(changes), event_type))
return (changes, 'git') | Reponds only to POST events and starts the build process
:arguments:
request
the http request object | Below is the the instruction that describes the task:
### Input:
Reponds only to POST events and starts the build process
:arguments:
request
the http request object
### Response:
def getChanges(self, request):
"""
Reponds only to POST events and starts the build process
:arguments:
request
the http request object
"""
expected_secret = isinstance(self.options, dict) and self.options.get('secret')
if expected_secret:
received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
received_secret = bytes2unicode(received_secret)
p = Properties()
p.master = self.master
expected_secret_value = yield p.render(expected_secret)
if received_secret != expected_secret_value:
raise ValueError("Invalid secret")
try:
content = request.content.read()
payload = json.loads(bytes2unicode(content))
except Exception as e:
raise ValueError("Error loading JSON: " + str(e))
event_type = request.getHeader(_HEADER_EVENT)
event_type = bytes2unicode(event_type)
# newer version of gitlab have a object_kind parameter,
# which allows not to use the http header
event_type = payload.get('object_kind', event_type)
codebase = request.args.get(b'codebase', [None])[0]
codebase = bytes2unicode(codebase)
if event_type in ("push", "tag_push", "Push Hook"):
user = payload['user_name']
repo = payload['repository']['name']
repo_url = payload['repository']['url']
changes = self._process_change(
payload, user, repo, repo_url, event_type, codebase=codebase)
elif event_type == 'merge_request':
changes = self._process_merge_request_change(
payload, event_type, codebase=codebase)
else:
changes = []
if changes:
log.msg("Received {} changes from {} gitlab event".format(
len(changes), event_type))
return (changes, 'git') |
def parse_args(parser):
"""Process args."""
args = parser.parse_args()
if args.q:
logging.getLogger().disabled = True
sys.stdout = sys.stderr = open(os.devnull, "w")
if args.a:
util.Color.alpha_num = args.a
if args.i:
image_file = image.get(args.i, iterative=args.iterative)
colors_plain = colors.get(image_file, args.l, args.backend,
sat=args.saturate)
if args.theme:
colors_plain = theme.file(args.theme, args.l)
if args.R:
colors_plain = theme.file(os.path.join(CACHE_DIR, "colors.json"))
if args.b:
args.b = "#%s" % (args.b.strip("#"))
colors_plain["special"]["background"] = args.b
colors_plain["colors"]["color0"] = args.b
if not args.n:
wallpaper.change(colors_plain["wallpaper"])
sequences.send(colors_plain, to_send=not args.s, vte_fix=args.vte)
if sys.stdout.isatty():
colors.palette()
export.every(colors_plain)
if not args.e:
reload.env(tty_reload=not args.t)
if args.o:
for cmd in args.o:
util.disown([cmd])
if not args.e:
reload.gtk() | Process args. | Below is the the instruction that describes the task:
### Input:
Process args.
### Response:
def parse_args(parser):
"""Process args."""
args = parser.parse_args()
if args.q:
logging.getLogger().disabled = True
sys.stdout = sys.stderr = open(os.devnull, "w")
if args.a:
util.Color.alpha_num = args.a
if args.i:
image_file = image.get(args.i, iterative=args.iterative)
colors_plain = colors.get(image_file, args.l, args.backend,
sat=args.saturate)
if args.theme:
colors_plain = theme.file(args.theme, args.l)
if args.R:
colors_plain = theme.file(os.path.join(CACHE_DIR, "colors.json"))
if args.b:
args.b = "#%s" % (args.b.strip("#"))
colors_plain["special"]["background"] = args.b
colors_plain["colors"]["color0"] = args.b
if not args.n:
wallpaper.change(colors_plain["wallpaper"])
sequences.send(colors_plain, to_send=not args.s, vte_fix=args.vte)
if sys.stdout.isatty():
colors.palette()
export.every(colors_plain)
if not args.e:
reload.env(tty_reload=not args.t)
if args.o:
for cmd in args.o:
util.disown([cmd])
if not args.e:
reload.gtk() |
def assets(self) -> List[Asset]:
"""
Returns the assets in the transaction.
"""
return list(filter(is_element(Asset), self.content)) | Returns the assets in the transaction. | Below is the the instruction that describes the task:
### Input:
Returns the assets in the transaction.
### Response:
def assets(self) -> List[Asset]:
"""
Returns the assets in the transaction.
"""
return list(filter(is_element(Asset), self.content)) |
def register_defaults(self):
"""Register default compilers, preprocessors and MIME types."""
self.mimetypes.register_defaults()
self.preprocessors.register_defaults()
self.postprocessors.register_defaults() | Register default compilers, preprocessors and MIME types. | Below is the the instruction that describes the task:
### Input:
Register default compilers, preprocessors and MIME types.
### Response:
def register_defaults(self):
"""Register default compilers, preprocessors and MIME types."""
self.mimetypes.register_defaults()
self.preprocessors.register_defaults()
self.postprocessors.register_defaults() |
def content_check(self, result):
'''
Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return:
'''
if not isinstance(result, dict):
err_msg = 'Malformed state return. Data must be a dictionary type.'
elif not isinstance(result.get('changes'), dict):
err_msg = "'Changes' should be a dictionary."
else:
missing = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in result:
missing.append(val)
if missing:
err_msg = 'The following keys were not present in the state return: {0}.'.format(', '.join(missing))
else:
err_msg = None
if err_msg:
raise SaltException(err_msg)
return result | Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return: | Below is the the instruction that describes the task:
### Input:
Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return:
### Response:
def content_check(self, result):
'''
Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return:
'''
if not isinstance(result, dict):
err_msg = 'Malformed state return. Data must be a dictionary type.'
elif not isinstance(result.get('changes'), dict):
err_msg = "'Changes' should be a dictionary."
else:
missing = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in result:
missing.append(val)
if missing:
err_msg = 'The following keys were not present in the state return: {0}.'.format(', '.join(missing))
else:
err_msg = None
if err_msg:
raise SaltException(err_msg)
return result |
def discretize(self, contact_id=0, accuracy=0.004, dt=0.001):
"""
Sample this motion track into discretized motion events.
Args:
contact_id: contact point id
accuracy: motion minimum difference in space
dt: sample time difference
"""
if not self.event_points:
return []
events = []
action_dt = accuracy / self.speed
dt = dt or action_dt
ep0 = self.event_points[0]
for _ in range(int(ep0[0] / dt)):
events.append(['s', dt])
events.append(['d', ep0[1], contact_id])
for i, ep in enumerate(self.event_points[1:]):
prev_ts = self.event_points[i][0]
curr_ts = ep[0]
p0 = self.event_points[i][1]
p1 = ep[1]
if p0 == p1:
# hold
for _ in range(int((curr_ts - prev_ts) / dt)):
events.append(['s', dt])
else:
# move
dpoints = track_sampling([p0, p1], accuracy)
for p in dpoints:
events.append(['m', p, contact_id])
for _ in range(int(action_dt / dt)):
events.append(['s', dt])
events.append(['u', contact_id])
return events | Sample this motion track into discretized motion events.
Args:
contact_id: contact point id
accuracy: motion minimum difference in space
dt: sample time difference | Below is the the instruction that describes the task:
### Input:
Sample this motion track into discretized motion events.
Args:
contact_id: contact point id
accuracy: motion minimum difference in space
dt: sample time difference
### Response:
def discretize(self, contact_id=0, accuracy=0.004, dt=0.001):
"""
Sample this motion track into discretized motion events.
Args:
contact_id: contact point id
accuracy: motion minimum difference in space
dt: sample time difference
"""
if not self.event_points:
return []
events = []
action_dt = accuracy / self.speed
dt = dt or action_dt
ep0 = self.event_points[0]
for _ in range(int(ep0[0] / dt)):
events.append(['s', dt])
events.append(['d', ep0[1], contact_id])
for i, ep in enumerate(self.event_points[1:]):
prev_ts = self.event_points[i][0]
curr_ts = ep[0]
p0 = self.event_points[i][1]
p1 = ep[1]
if p0 == p1:
# hold
for _ in range(int((curr_ts - prev_ts) / dt)):
events.append(['s', dt])
else:
# move
dpoints = track_sampling([p0, p1], accuracy)
for p in dpoints:
events.append(['m', p, contact_id])
for _ in range(int(action_dt / dt)):
events.append(['s', dt])
events.append(['u', contact_id])
return events |
def build_permuted_index(self):
"""
Build PermutedIndex for all your binary hashings.
PermutedIndex would be used to find the neighbour bucket key
in terms of Hamming distance. Permute_configs is nested dict
in the following format:
permuted_config = {"<hash_name>":
{ "num_permutation":50,
"beam_size":10,
"num_neighbour":100 }
}
"""
for child_hash in self.child_hashes:
# Get config values for child hash
config = child_hash['config']
num_permutation = config['num_permutation']
beam_size = config['beam_size']
num_neighbour = config['num_neighbour']
# Get used buckets keys for child hash
bucket_keys = child_hash['bucket_keys'].keys()
# Get actual child hash
lshash = child_hash['hash']
# Compute permuted index for this hash
self.permutation.build_permuted_index(lshash,bucket_keys,num_permutation,beam_size,num_neighbour) | Build PermutedIndex for all your binary hashings.
PermutedIndex would be used to find the neighbour bucket key
in terms of Hamming distance. Permute_configs is nested dict
in the following format:
permuted_config = {"<hash_name>":
{ "num_permutation":50,
"beam_size":10,
"num_neighbour":100 }
} | Below is the the instruction that describes the task:
### Input:
Build PermutedIndex for all your binary hashings.
PermutedIndex would be used to find the neighbour bucket key
in terms of Hamming distance. Permute_configs is nested dict
in the following format:
permuted_config = {"<hash_name>":
{ "num_permutation":50,
"beam_size":10,
"num_neighbour":100 }
}
### Response:
def build_permuted_index(self):
"""
Build PermutedIndex for all your binary hashings.
PermutedIndex would be used to find the neighbour bucket key
in terms of Hamming distance. Permute_configs is nested dict
in the following format:
permuted_config = {"<hash_name>":
{ "num_permutation":50,
"beam_size":10,
"num_neighbour":100 }
}
"""
for child_hash in self.child_hashes:
# Get config values for child hash
config = child_hash['config']
num_permutation = config['num_permutation']
beam_size = config['beam_size']
num_neighbour = config['num_neighbour']
# Get used buckets keys for child hash
bucket_keys = child_hash['bucket_keys'].keys()
# Get actual child hash
lshash = child_hash['hash']
# Compute permuted index for this hash
self.permutation.build_permuted_index(lshash,bucket_keys,num_permutation,beam_size,num_neighbour) |
def render_impl(self, template, context, at_paths=None,
at_encoding=anytemplate.compat.ENCODING, **kwargs):
"""
Render given template file and return the result.
:param template: Template file path
:param context: A dict or dict-like object to instantiate given
template file
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string
"""
renderer = self._make_renderer(at_paths, at_encoding, **kwargs)
ctxs = [] if context is None else [context]
if os.path.sep in template: # `template` is in abs/rel-path.
return renderer.render_path(template, *ctxs)
else:
if template.endswith(renderer.file_extension):
template = os.path.splitext(template)[0]
return renderer.render_name(template, *ctxs) | Render given template file and return the result.
:param template: Template file path
:param context: A dict or dict-like object to instantiate given
template file
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string | Below is the the instruction that describes the task:
### Input:
Render given template file and return the result.
:param template: Template file path
:param context: A dict or dict-like object to instantiate given
template file
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string
### Response:
def render_impl(self, template, context, at_paths=None,
at_encoding=anytemplate.compat.ENCODING, **kwargs):
"""
Render given template file and return the result.
:param template: Template file path
:param context: A dict or dict-like object to instantiate given
template file
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string
"""
renderer = self._make_renderer(at_paths, at_encoding, **kwargs)
ctxs = [] if context is None else [context]
if os.path.sep in template: # `template` is in abs/rel-path.
return renderer.render_path(template, *ctxs)
else:
if template.endswith(renderer.file_extension):
template = os.path.splitext(template)[0]
return renderer.render_name(template, *ctxs) |
def available():
"""
List available types for 'smudge'
"""
db = smudge_db.get()
click.echo('{:<6} {:<6} {:<50}'.format('Type', 'Offset', 'Magic'))
for k, v in db.items():
click.echo('{type:<6} {offset:<6} {magic}'.format(
type=k, magic=v['magic'].encode('hex'), offset=v['offset'])) | List available types for 'smudge' | Below is the the instruction that describes the task:
### Input:
List available types for 'smudge'
### Response:
def available():
"""
List available types for 'smudge'
"""
db = smudge_db.get()
click.echo('{:<6} {:<6} {:<50}'.format('Type', 'Offset', 'Magic'))
for k, v in db.items():
click.echo('{type:<6} {offset:<6} {magic}'.format(
type=k, magic=v['magic'].encode('hex'), offset=v['offset'])) |
def buscar_por_id(self, id_ambiente):
"""Obtém um ambiente a partir da chave primária (identificador).
:param id_ambiente: Identificador do ambiente.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'acl_path': < acl_path >,
'ipv4_template': < ipv4_template >,
'ipv6_template': < ipv6_template >,
'ambiente_rede': < ambiente_rede >}}
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidParameterError: Identificador do ambiente é nulo ou inválido.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
if not is_valid_int_param(id_ambiente):
raise InvalidParameterError(
u'O identificador do ambiente é inválido ou não foi informado.')
url = 'environment/id/' + str(id_ambiente) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | Obtém um ambiente a partir da chave primária (identificador).
:param id_ambiente: Identificador do ambiente.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'acl_path': < acl_path >,
'ipv4_template': < ipv4_template >,
'ipv6_template': < ipv6_template >,
'ambiente_rede': < ambiente_rede >}}
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidParameterError: Identificador do ambiente é nulo ou inválido.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta. | Below is the the instruction that describes the task:
### Input:
Obtém um ambiente a partir da chave primária (identificador).
:param id_ambiente: Identificador do ambiente.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'acl_path': < acl_path >,
'ipv4_template': < ipv4_template >,
'ipv6_template': < ipv6_template >,
'ambiente_rede': < ambiente_rede >}}
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidParameterError: Identificador do ambiente é nulo ou inválido.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
### Response:
def buscar_por_id(self, id_ambiente):
"""Obtém um ambiente a partir da chave primária (identificador).
:param id_ambiente: Identificador do ambiente.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'acl_path': < acl_path >,
'ipv4_template': < ipv4_template >,
'ipv6_template': < ipv6_template >,
'ambiente_rede': < ambiente_rede >}}
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidParameterError: Identificador do ambiente é nulo ou inválido.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
if not is_valid_int_param(id_ambiente):
raise InvalidParameterError(
u'O identificador do ambiente é inválido ou não foi informado.')
url = 'environment/id/' + str(id_ambiente) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) |
def run_cell(self, raw_cell, store_history=False, silent=False):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effets, such as implicit displayhooks, history,
and logging. silent=True forces store_history=False.
"""
if (not raw_cell) or raw_cell.isspace():
return
if silent:
store_history = False
self.input_splitter.push(raw_cell)
# Check for cell magics, which leave state behind. This interface is
# ugly, we need to do something cleaner later... Now the logic is
# simply that the input_splitter remembers if there was a cell magic,
# and in that case we grab the cell body.
if self.input_splitter.cell_magic_parts:
self._current_cell_magic_body = \
''.join(self.input_splitter.cell_magic_parts)
cell = self.input_splitter.source_reset()
with self.builtin_trap:
prefilter_failed = False
if len(cell.splitlines()) == 1:
try:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
except AliasError as e:
error(e)
prefilter_failed = True
except Exception:
# don't allow prefilter errors to crash IPython
self.showtraceback()
prefilter_failed = True
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
if not prefilter_failed:
# don't run if prefilter failed
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
try:
code_ast = self.compile.ast_parse(cell,
filename=cell_name)
except IndentationError:
self.showindentationerror()
if store_history:
self.execution_count += 1
return None
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError):
self.showsyntaxerror()
if store_history:
self.execution_count += 1
return None
interactivity = "none" if silent else self.ast_node_interactivity
self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity)
# Execute any registered post-execution functions.
# unless we are silent
post_exec = [] if silent else self._post_execute.iteritems()
for func, status in post_exec:
if self.disable_failing_post_execute and not status:
continue
try:
func()
except KeyboardInterrupt:
print >> io.stderr, "\nKeyboardInterrupt"
except Exception:
# register as failing:
self._post_execute[func] = False
self.showtraceback()
print >> io.stderr, '\n'.join([
"post-execution function %r produced an error." % func,
"If this problem persists, you can disable failing post-exec functions with:",
"",
" get_ipython().disable_failing_post_execute = True"
])
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1 | Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effets, such as implicit displayhooks, history,
and logging. silent=True forces store_history=False. | Below is the the instruction that describes the task:
### Input:
Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effets, such as implicit displayhooks, history,
and logging. silent=True forces store_history=False.
### Response:
def run_cell(self, raw_cell, store_history=False, silent=False):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effets, such as implicit displayhooks, history,
and logging. silent=True forces store_history=False.
"""
if (not raw_cell) or raw_cell.isspace():
return
if silent:
store_history = False
self.input_splitter.push(raw_cell)
# Check for cell magics, which leave state behind. This interface is
# ugly, we need to do something cleaner later... Now the logic is
# simply that the input_splitter remembers if there was a cell magic,
# and in that case we grab the cell body.
if self.input_splitter.cell_magic_parts:
self._current_cell_magic_body = \
''.join(self.input_splitter.cell_magic_parts)
cell = self.input_splitter.source_reset()
with self.builtin_trap:
prefilter_failed = False
if len(cell.splitlines()) == 1:
try:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
except AliasError as e:
error(e)
prefilter_failed = True
except Exception:
# don't allow prefilter errors to crash IPython
self.showtraceback()
prefilter_failed = True
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
if not prefilter_failed:
# don't run if prefilter failed
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
try:
code_ast = self.compile.ast_parse(cell,
filename=cell_name)
except IndentationError:
self.showindentationerror()
if store_history:
self.execution_count += 1
return None
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError):
self.showsyntaxerror()
if store_history:
self.execution_count += 1
return None
interactivity = "none" if silent else self.ast_node_interactivity
self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity)
# Execute any registered post-execution functions.
# unless we are silent
post_exec = [] if silent else self._post_execute.iteritems()
for func, status in post_exec:
if self.disable_failing_post_execute and not status:
continue
try:
func()
except KeyboardInterrupt:
print >> io.stderr, "\nKeyboardInterrupt"
except Exception:
# register as failing:
self._post_execute[func] = False
self.showtraceback()
print >> io.stderr, '\n'.join([
"post-execution function %r produced an error." % func,
"If this problem persists, you can disable failing post-exec functions with:",
"",
" get_ipython().disable_failing_post_execute = True"
])
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1 |
def search_by_age(cls, *, limit=100, page=1, accounts=None, locations=None, age=720,
properties=None, include_disabled=False):
"""Search for resources based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
accounts (`list` of `int`): A list of account id's to limit the returned resources to
locations (`list` of `str`): A list of locations as strings to limit the search for
age (`int`): Age of instances older than `age` days to return
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
include_disabled (`bool`): Include resources from disabled accounts. Default: False
Returns:
`list` of `Resource`
"""
qry = cls.search(
limit=limit,
page=page,
accounts=accounts,
locations=locations,
properties=properties,
include_disabled=include_disabled,
return_query=True
)
age_alias = aliased(ResourceProperty)
qry = (
qry.join(age_alias, Resource.resource_id == age_alias.resource_id)
.filter(
age_alias.name == 'launch_date',
cast(func.JSON_UNQUOTE(age_alias.value), DATETIME) < datetime.now() - timedelta(days=age)
)
)
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] | Search for resources based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
accounts (`list` of `int`): A list of account id's to limit the returned resources to
locations (`list` of `str`): A list of locations as strings to limit the search for
age (`int`): Age of instances older than `age` days to return
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
include_disabled (`bool`): Include resources from disabled accounts. Default: False
Returns:
`list` of `Resource` | Below is the the instruction that describes the task:
### Input:
Search for resources based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
accounts (`list` of `int`): A list of account id's to limit the returned resources to
locations (`list` of `str`): A list of locations as strings to limit the search for
age (`int`): Age of instances older than `age` days to return
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
include_disabled (`bool`): Include resources from disabled accounts. Default: False
Returns:
`list` of `Resource`
### Response:
def search_by_age(cls, *, limit=100, page=1, accounts=None, locations=None, age=720,
properties=None, include_disabled=False):
"""Search for resources based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
accounts (`list` of `int`): A list of account id's to limit the returned resources to
locations (`list` of `str`): A list of locations as strings to limit the search for
age (`int`): Age of instances older than `age` days to return
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
include_disabled (`bool`): Include resources from disabled accounts. Default: False
Returns:
`list` of `Resource`
"""
qry = cls.search(
limit=limit,
page=page,
accounts=accounts,
locations=locations,
properties=properties,
include_disabled=include_disabled,
return_query=True
)
age_alias = aliased(ResourceProperty)
qry = (
qry.join(age_alias, Resource.resource_id == age_alias.resource_id)
.filter(
age_alias.name == 'launch_date',
cast(func.JSON_UNQUOTE(age_alias.value), DATETIME) < datetime.now() - timedelta(days=age)
)
)
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] |
def get_chat_ids(self):
"""Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
"""
updates = self.get_updates()
chat_ids = []
if updates:
for update in updates:
message = update['message']
if message['text'] == '/start':
chat_ids.append(message['chat']['id'])
return list(set(chat_ids)) | Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
### Response:
def get_chat_ids(self):
"""Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
"""
updates = self.get_updates()
chat_ids = []
if updates:
for update in updates:
message = update['message']
if message['text'] == '/start':
chat_ids.append(message['chat']['id'])
return list(set(chat_ids)) |
def value_opt_step(i,
opt_state,
opt_update,
value_net_apply,
padded_observations,
padded_rewards,
reward_mask,
gamma=0.99):
"""Value optimizer step."""
value_params = trax_opt.get_params(opt_state)
# Note this partial application here and argnums above in ppo_opt_step.
g = grad(functools.partial(value_loss, value_net_apply))(
value_params,
padded_observations,
padded_rewards,
reward_mask,
gamma=gamma)
return opt_update(i, g, opt_state) | Value optimizer step. | Below is the the instruction that describes the task:
### Input:
Value optimizer step.
### Response:
def value_opt_step(i,
opt_state,
opt_update,
value_net_apply,
padded_observations,
padded_rewards,
reward_mask,
gamma=0.99):
"""Value optimizer step."""
value_params = trax_opt.get_params(opt_state)
# Note this partial application here and argnums above in ppo_opt_step.
g = grad(functools.partial(value_loss, value_net_apply))(
value_params,
padded_observations,
padded_rewards,
reward_mask,
gamma=gamma)
return opt_update(i, g, opt_state) |
def build_phenotype(phenotype_id, adapter):
"""Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
)
"""
phenotype_obj = {}
phenotype = adapter.hpo_term(phenotype_id)
if phenotype:
phenotype_obj['phenotype_id'] = phenotype['hpo_id']
phenotype_obj['feature'] = phenotype['description']
return phenotype | Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
) | Below is the the instruction that describes the task:
### Input:
Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
)
### Response:
def build_phenotype(phenotype_id, adapter):
"""Build a small phenotype object
Build a dictionary with phenotype_id and description
Args:
phenotype_id (str): The phenotype id
adapter (scout.adapter.MongoAdapter)
Returns:
phenotype_obj (dict):
dict(
phenotype_id = str,
feature = str, # description of phenotype
)
"""
phenotype_obj = {}
phenotype = adapter.hpo_term(phenotype_id)
if phenotype:
phenotype_obj['phenotype_id'] = phenotype['hpo_id']
phenotype_obj['feature'] = phenotype['description']
return phenotype |
def add(self, src_cls, module, package=None):
"""
Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError`
"""
# import module containing the layer class
mod = importlib.import_module(module, package)
# get layer class definition from the module
self.sources[src_cls] = getattr(mod, src_cls) | Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError` | Below is the the instruction that describes the task:
### Input:
Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError`
### Response:
def add(self, src_cls, module, package=None):
"""
Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError`
"""
# import module containing the layer class
mod = importlib.import_module(module, package)
# get layer class definition from the module
self.sources[src_cls] = getattr(mod, src_cls) |
def reverse_pad(self, data, block_size):
""" :meth:`.WBlockPadding.reverse_pad` method implementation
"""
pad_byte = data[-1]
if pad_byte > block_size:
raise ValueError('Invalid padding')
padding = bytes([pad_byte] * pad_byte)
if data[-pad_byte:] != padding:
raise ValueError('Invalid padding')
return data[:-pad_byte] | :meth:`.WBlockPadding.reverse_pad` method implementation | Below is the the instruction that describes the task:
### Input:
:meth:`.WBlockPadding.reverse_pad` method implementation
### Response:
def reverse_pad(self, data, block_size):
""" :meth:`.WBlockPadding.reverse_pad` method implementation
"""
pad_byte = data[-1]
if pad_byte > block_size:
raise ValueError('Invalid padding')
padding = bytes([pad_byte] * pad_byte)
if data[-pad_byte:] != padding:
raise ValueError('Invalid padding')
return data[:-pad_byte] |
def markUnwatched(self):
""" Mark video unwatched. """
key = '/:/unscrobble?key=%s&identifier=com.plexapp.plugins.library' % self.ratingKey
self._server.query(key)
self.reload() | Mark video unwatched. | Below is the the instruction that describes the task:
### Input:
Mark video unwatched.
### Response:
def markUnwatched(self):
""" Mark video unwatched. """
key = '/:/unscrobble?key=%s&identifier=com.plexapp.plugins.library' % self.ratingKey
self._server.query(key)
self.reload() |
def bootstrap(self, path_or_uri):
""" Initialize a database.
:param database_path: The absolute path to the database to initialize.
"""
_logger.debug("Bootstrapping new database: %s", path_or_uri)
self.database_uri = _urify_db(path_or_uri)
db = sa.create_engine(self.database_uri)
Base.metadata.create_all(db) | Initialize a database.
:param database_path: The absolute path to the database to initialize. | Below is the the instruction that describes the task:
### Input:
Initialize a database.
:param database_path: The absolute path to the database to initialize.
### Response:
def bootstrap(self, path_or_uri):
""" Initialize a database.
:param database_path: The absolute path to the database to initialize.
"""
_logger.debug("Bootstrapping new database: %s", path_or_uri)
self.database_uri = _urify_db(path_or_uri)
db = sa.create_engine(self.database_uri)
Base.metadata.create_all(db) |
def record_by_name(self, hostname):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.record_by_addr(addr) | Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com) | Below is the the instruction that describes the task:
### Input:
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com)
### Response:
def record_by_name(self, hostname):
"""
Returns dictionary with city data containing `country_code`, `country_name`,
`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,
`metro_code`, `area_code`, `region_code` and `time_zone`.
:arg hostname: Hostname (e.g. example.com)
"""
addr = self._gethostbyname(hostname)
return self.record_by_addr(addr) |
def ServiceWorker_setForceUpdateOnPageLoad(self, forceUpdateOnPageLoad):
"""
Function path: ServiceWorker.setForceUpdateOnPageLoad
Domain: ServiceWorker
Method name: setForceUpdateOnPageLoad
Parameters:
Required arguments:
'forceUpdateOnPageLoad' (type: boolean) -> No description
No return value.
"""
assert isinstance(forceUpdateOnPageLoad, (bool,)
), "Argument 'forceUpdateOnPageLoad' must be of type '['bool']'. Received type: '%s'" % type(
forceUpdateOnPageLoad)
subdom_funcs = self.synchronous_command(
'ServiceWorker.setForceUpdateOnPageLoad', forceUpdateOnPageLoad=
forceUpdateOnPageLoad)
return subdom_funcs | Function path: ServiceWorker.setForceUpdateOnPageLoad
Domain: ServiceWorker
Method name: setForceUpdateOnPageLoad
Parameters:
Required arguments:
'forceUpdateOnPageLoad' (type: boolean) -> No description
No return value. | Below is the the instruction that describes the task:
### Input:
Function path: ServiceWorker.setForceUpdateOnPageLoad
Domain: ServiceWorker
Method name: setForceUpdateOnPageLoad
Parameters:
Required arguments:
'forceUpdateOnPageLoad' (type: boolean) -> No description
No return value.
### Response:
def ServiceWorker_setForceUpdateOnPageLoad(self, forceUpdateOnPageLoad):
"""
Function path: ServiceWorker.setForceUpdateOnPageLoad
Domain: ServiceWorker
Method name: setForceUpdateOnPageLoad
Parameters:
Required arguments:
'forceUpdateOnPageLoad' (type: boolean) -> No description
No return value.
"""
assert isinstance(forceUpdateOnPageLoad, (bool,)
), "Argument 'forceUpdateOnPageLoad' must be of type '['bool']'. Received type: '%s'" % type(
forceUpdateOnPageLoad)
subdom_funcs = self.synchronous_command(
'ServiceWorker.setForceUpdateOnPageLoad', forceUpdateOnPageLoad=
forceUpdateOnPageLoad)
return subdom_funcs |
def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt | reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7) | Below is the the instruction that describes the task:
### Input:
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
### Response:
def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt |
def get_nic(self, datacenter_id, server_id, nic_id, depth=1):
"""
Retrieves a NIC by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s/servers/%s/nics/%s?depth=%s' % (
datacenter_id,
server_id,
nic_id,
str(depth)))
return response | Retrieves a NIC by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int`` | Below is the the instruction that describes the task:
### Input:
Retrieves a NIC by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
### Response:
def get_nic(self, datacenter_id, server_id, nic_id, depth=1):
"""
Retrieves a NIC by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic_id: The unique ID of the NIC.
:type nic_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s/servers/%s/nics/%s?depth=%s' % (
datacenter_id,
server_id,
nic_id,
str(depth)))
return response |
def get_list(self, list_id):
"""
Get info of specified list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.get_list(list_id=list_id))) | Get info of specified list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object | Below is the the instruction that describes the task:
### Input:
Get info of specified list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
### Response:
def get_list(self, list_id):
"""
Get info of specified list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.get_list(list_id=list_id))) |
def _parse_response(response, clazz, is_list=False, resource_name=None):
"""Parse a Marathon response into an object or list of objects."""
target = response.json()[
resource_name] if resource_name else response.json()
if is_list:
return [clazz.from_json(resource) for resource in target]
else:
return clazz.from_json(target) | Parse a Marathon response into an object or list of objects. | Below is the the instruction that describes the task:
### Input:
Parse a Marathon response into an object or list of objects.
### Response:
def _parse_response(response, clazz, is_list=False, resource_name=None):
"""Parse a Marathon response into an object or list of objects."""
target = response.json()[
resource_name] if resource_name else response.json()
if is_list:
return [clazz.from_json(resource) for resource in target]
else:
return clazz.from_json(target) |
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules | Using collected module definitions extend linkages | Below is the the instruction that describes the task:
### Input:
Using collected module definitions extend linkages
### Response:
def _extend_module_definitions(self, graph):
"""
Using collected module definitions extend linkages
"""
for mod_id in self._modules:
mod_identity = self._get_triplet_value(graph, URIRef(mod_id), SBOL.module)
modules = []
for mod in graph.triples((mod_identity, SBOL.module, None)):
md = self._get_rdf_identified(graph, mod[2])
definition_id = self._get_triplet_value(graph, mod[2], SBOL.definition)
md['definition'] = self._modules[definition_id]
maps_to = []
for m in graph.triples((mod[2], SBOL.mapsTo, None)):
mt = self._get_rdf_identified(graph, m[2])
mt['refinement'] = self._get_triplet_value(graph, m[2], SBOL.refinement)
local_id = self._get_triplet_value(graph, m[2], SBOL.local)
remote_id = self._get_triplet_value(graph, m[2], SBOL.remote)
mt['local'] = self._functional_component_store[local_id]
mt['remote'] = self._functional_component_store[remote_id]
maps_to.append(MapsTo(**mt))
modules.append(Module(maps_to=maps_to, **md))
self._modules[mod_id].modules = modules |
def exif_orientation(im):
"""
Rotate and/or flip an image to respect the image's EXIF orientation data.
"""
try:
exif = im._getexif()
except Exception:
# There are many ways that _getexif fails, we're just going to blanket
# cover them all.
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
im = im.transpose(Image.ROTATE_180)
elif orientation == 4:
im = im.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
im = im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
im = im.transpose(Image.ROTATE_270)
elif orientation == 7:
im = im.transpose(Image.ROTATE_90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
im = im.transpose(Image.ROTATE_90)
return im | Rotate and/or flip an image to respect the image's EXIF orientation data. | Below is the the instruction that describes the task:
### Input:
Rotate and/or flip an image to respect the image's EXIF orientation data.
### Response:
def exif_orientation(im):
"""
Rotate and/or flip an image to respect the image's EXIF orientation data.
"""
try:
exif = im._getexif()
except Exception:
# There are many ways that _getexif fails, we're just going to blanket
# cover them all.
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
im = im.transpose(Image.ROTATE_180)
elif orientation == 4:
im = im.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
im = im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
im = im.transpose(Image.ROTATE_270)
elif orientation == 7:
im = im.transpose(Image.ROTATE_90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
im = im.transpose(Image.ROTATE_90)
return im |
def get_filters(self, filters=None, filters_map=None):
"""Converts a string given by the user into a valid api filter value.
:filters: Default filter values.
{'filter1': filter_value, 'filter2': filter_value}
:filters_map: mapping between user input and valid api filter values.
{'filter_name':{_("true_value"):True, _("false_value"):False}
"""
filters = filters or {}
filters_map = filters_map or {}
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string().strip()
if filter_field and filter_string:
filter_map = filters_map.get(filter_field, {})
filters[filter_field] = filter_string
for k, v in filter_map.items():
# k is django.utils.functional.__proxy__
# and could not be searched in dict
if filter_string.lower() == k:
filters[filter_field] = v
break
return filters | Converts a string given by the user into a valid api filter value.
:filters: Default filter values.
{'filter1': filter_value, 'filter2': filter_value}
:filters_map: mapping between user input and valid api filter values.
{'filter_name':{_("true_value"):True, _("false_value"):False} | Below is the the instruction that describes the task:
### Input:
Converts a string given by the user into a valid api filter value.
:filters: Default filter values.
{'filter1': filter_value, 'filter2': filter_value}
:filters_map: mapping between user input and valid api filter values.
{'filter_name':{_("true_value"):True, _("false_value"):False}
### Response:
def get_filters(self, filters=None, filters_map=None):
"""Converts a string given by the user into a valid api filter value.
:filters: Default filter values.
{'filter1': filter_value, 'filter2': filter_value}
:filters_map: mapping between user input and valid api filter values.
{'filter_name':{_("true_value"):True, _("false_value"):False}
"""
filters = filters or {}
filters_map = filters_map or {}
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string().strip()
if filter_field and filter_string:
filter_map = filters_map.get(filter_field, {})
filters[filter_field] = filter_string
for k, v in filter_map.items():
# k is django.utils.functional.__proxy__
# and could not be searched in dict
if filter_string.lower() == k:
filters[filter_field] = v
break
return filters |
def task_assignments(self):
'''Retrieves all tasks currently assigned to this project.'''
url = str.format(
'projects/{}/task_assignments',
self.id
)
response = self.hv.get_request(url)
return [TaskAssignment(self.hv, tj['task_assignment']) for tj in response] | Retrieves all tasks currently assigned to this project. | Below is the the instruction that describes the task:
### Input:
Retrieves all tasks currently assigned to this project.
### Response:
def task_assignments(self):
'''Retrieves all tasks currently assigned to this project.'''
url = str.format(
'projects/{}/task_assignments',
self.id
)
response = self.hv.get_request(url)
return [TaskAssignment(self.hv, tj['task_assignment']) for tj in response] |
def __execute_cmd(command, host=None,
admin_username=None, admin_password=None,
module=None):
'''
Execute rac commands
'''
if module:
# -a takes 'server' or 'switch' to represent all servers
# or all switches in a chassis. Allow
# user to say 'module=ALL_SERVER' or 'module=ALL_SWITCH'
if module.startswith('ALL_'):
modswitch = '-a '\
+ module[module.index('_') + 1:len(module)].lower()
else:
modswitch = '-m {0}'.format(module)
else:
modswitch = ''
if not host:
# This is a local call
cmd = __salt__['cmd.run_all']('racadm {0} {1}'.format(command,
modswitch))
else:
cmd = __salt__['cmd.run_all'](
'racadm -r {0} -u {1} -p {2} {3} {4}'.format(host,
admin_username,
admin_password,
command,
modswitch),
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return False
return True | Execute rac commands | Below is the the instruction that describes the task:
### Input:
Execute rac commands
### Response:
def __execute_cmd(command, host=None,
admin_username=None, admin_password=None,
module=None):
'''
Execute rac commands
'''
if module:
# -a takes 'server' or 'switch' to represent all servers
# or all switches in a chassis. Allow
# user to say 'module=ALL_SERVER' or 'module=ALL_SWITCH'
if module.startswith('ALL_'):
modswitch = '-a '\
+ module[module.index('_') + 1:len(module)].lower()
else:
modswitch = '-m {0}'.format(module)
else:
modswitch = ''
if not host:
# This is a local call
cmd = __salt__['cmd.run_all']('racadm {0} {1}'.format(command,
modswitch))
else:
cmd = __salt__['cmd.run_all'](
'racadm -r {0} -u {1} -p {2} {3} {4}'.format(host,
admin_username,
admin_password,
command,
modswitch),
output_loglevel='quiet')
if cmd['retcode'] != 0:
log.warning('racadm returned an exit code of %s', cmd['retcode'])
return False
return True |
def publish_topology_description_changed(self, previous_description,
new_description, topology_id):
"""Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.
"""
event = TopologyDescriptionChangedEvent(previous_description,
new_description, topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.description_changed(event)
except Exception:
_handle_exception() | Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of. | Below is the the instruction that describes the task:
### Input:
Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.
### Response:
def publish_topology_description_changed(self, previous_description,
new_description, topology_id):
"""Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.
"""
event = TopologyDescriptionChangedEvent(previous_description,
new_description, topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.description_changed(event)
except Exception:
_handle_exception() |
def get_global_rate_limit(self):
"""Get the global rate limit per client.
:rtype: int
:returns: The global rate limit for each client.
"""
r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter')
j = json.loads(r.read().decode('utf-8'))
return int(j.get('metadata', {}).get('rate_per_second', 300)) | Get the global rate limit per client.
:rtype: int
:returns: The global rate limit for each client. | Below is the the instruction that describes the task:
### Input:
Get the global rate limit per client.
:rtype: int
:returns: The global rate limit for each client.
### Response:
def get_global_rate_limit(self):
"""Get the global rate limit per client.
:rtype: int
:returns: The global rate limit for each client.
"""
r = urllib.request.urlopen('https://archive.org/metadata/iamine-rate-limiter')
j = json.loads(r.read().decode('utf-8'))
return int(j.get('metadata', {}).get('rate_per_second', 300)) |
def is_cell_empty(self, cell):
"""Checks if the cell is empty."""
if cell is None:
return True
elif self._is_cell_empty:
return self._is_cell_empty(cell)
else:
return cell is None | Checks if the cell is empty. | Below is the the instruction that describes the task:
### Input:
Checks if the cell is empty.
### Response:
def is_cell_empty(self, cell):
"""Checks if the cell is empty."""
if cell is None:
return True
elif self._is_cell_empty:
return self._is_cell_empty(cell)
else:
return cell is None |
def _loadBatch(self, item, batch):
"""
Loads the batch of items for this tree based on the inputed batch item.
:param item | <XBatchItem>
batch | <orb.RecordSet>
"""
if self.isThreadEnabled() and batch.isThreadEnabled():
self.loadBatchRequested.emit(batch)
self._batchloaders.append(weakref.ref(item))
else:
QApplication.setOverrideCursor(Qt.WaitCursor)
self.worker().loadRecords(batch)
QApplication.restoreOverrideCursor() | Loads the batch of items for this tree based on the inputed batch item.
:param item | <XBatchItem>
batch | <orb.RecordSet> | Below is the the instruction that describes the task:
### Input:
Loads the batch of items for this tree based on the inputed batch item.
:param item | <XBatchItem>
batch | <orb.RecordSet>
### Response:
def _loadBatch(self, item, batch):
"""
Loads the batch of items for this tree based on the inputed batch item.
:param item | <XBatchItem>
batch | <orb.RecordSet>
"""
if self.isThreadEnabled() and batch.isThreadEnabled():
self.loadBatchRequested.emit(batch)
self._batchloaders.append(weakref.ref(item))
else:
QApplication.setOverrideCursor(Qt.WaitCursor)
self.worker().loadRecords(batch)
QApplication.restoreOverrideCursor() |
def setStartdatetime(self, recording_start_time):
"""
Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time
"""
if isinstance(recording_start_time,datetime):
self.recording_start_time = recording_start_time
else:
self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S")
self.update_header() | Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time | Below is the the instruction that describes the task:
### Input:
Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time
### Response:
def setStartdatetime(self, recording_start_time):
"""
Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time
"""
if isinstance(recording_start_time,datetime):
self.recording_start_time = recording_start_time
else:
self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S")
self.update_header() |
def update(self, fontsize=0.0, text_color=None, border_color=None, fill_color=None, rotate=-1):
"""Update the appearance of an annotation."""
if self.type[0] == ANNOT_WIDGET:
print("Use updateWidget method for form fields.")
return False
val = _fitz.Annot_update(self, fontsize, text_color, border_color, fill_color, rotate)
"""
The following code fixes shortcomings of MuPDF's "pdf_update_annot"
function. Currently these are:
1. Opacity (all annots). MuPDF ignores this proprty. This requires
to add an ExtGState (extended graphics state) object in the
C code as well.
2. Dashing (all annots). MuPDF ignores this proprty.
3. Colors and font size for FreeText annotations.
4. Line end icons also for POLYGON and POLY_LINE annotations.
MuPDF only honors them for LINE annotations.
5. Always perform a "clean" for the annot, because MuPDF does not
enclose their syntax in a string pair "q ... Q", which may cause
Adobe and other readers not to display the annot.
"""
if not val is True: # skip if something went wrong
return val
def color_string(cs, code):
"""Return valid PDF color operator for a given color sequence.
"""
if cs is None: return ""
if hasattr(cs, "__float__") or len(cs) == 1:
app = " g\n" if code == "f" else " G\n"
elif len(cs) == 3:
app = " rg\n" if code == "f" else " RG\n"
else:
app = " k\n" if code == "f" else " K\n"
if hasattr(cs, "__len__"):
col = " ".join(map(str, cs)) + app
else:
col = "%g" % cs + app
return bytes(col, "utf8") if not fitz_py2 else col
type = self.type[0] # get the annot type
dt = self.border["dashes"] # get the dashes spec
bwidth = self.border["width"] # get border line width
stroke = self.colors["stroke"] # get the stroke color
fill = self.colors["fill"] # get the fill color
rect = None # used if we change the rect here
bfill = color_string(fill, "f")
p_ctm = self.parent._getTransformation() # page transformation matrix
imat = ~p_ctm # inverse page transf. matrix
line_end_le, line_end_ri = 0, 0 # line end codes
if self.lineEnds:
line_end_le, line_end_ri = self.lineEnds
ap = self._getAP() # get the annot operator source
ap_updated = False # assume we did nothing
if type == ANNOT_FREETEXT:
CheckColor(fill_color)
CheckColor(border_color)
CheckColor(text_color)
ap_tab = ap.splitlines() # split AP stream into lines
idx_BT = ap_tab.index(b"BT") # line no. of text start
# to avoid effort, we rely on a fixed format generated by MuPDF for
# this annot type: line 0 = fill color, line 5 border color, etc.
if fill_color is not None:
ap_tab[0] = color_string(fill_color, "f")
ap_updated = True
else:
ap_tab[0] = ap_tab[1] = ap_tab[2] = b""
ap_updated = True
if idx_BT == 7:
if bwidth > 0:
if border_color is not None:
ap_tab[4] = color_string(border_color, "s")
ap_updated = True
else: # for zero border width suppress border
ap_tab[3] = b"0 w"
ap_tab[4] = ap_tab[5] = ap_tab[6] = b""
ap_updated = True
if text_color is not None:
ap_tab[idx_BT + 1] = color_string(text_color, "f")
ap_updated = True
if fontsize > 0.0:
x = ap_tab[idx_BT + 2].split()
x[1] = b"%g" % fontsize
ap_tab[idx_BT + 2] = b" ".join(x)
ap_updated = True
if ap_updated:
ap = b"\n".join(ap_tab) # updated AP stream
if bfill != "":
if type == ANNOT_POLYGON:
ap = ap[:-1] + bfill + b"b" # close, fill, and stroke
ap_updated = True
elif type == ANNOT_POLYLINE:
ap = ap[:-1] + bfill + b"B" # fill and stroke
ap_updated = True
# Dashes not handled by MuPDF, so we do it here.
if dt:
dash = "[" + " ".join(map(str, dt)) + "] d\n"
ap = dash.encode("utf-8") + ap
# reset dashing - only applies for LINE annots with line ends given
ap = ap.replace(b"\nS\n", b"\nS\n[] d\n", 1)
ap_updated = True
# Opacity not handled by MuPDF, so we do it here. The /ExtGState object
# "Alp0" referenced here has already been added by our C code.
if 0 <= self.opacity < 1:
ap = b"/Alp0 gs\n" + ap
ap_updated = True
#----------------------------------------------------------------------
# the following handles line end symbols for 'Polygon' and 'Polyline
#----------------------------------------------------------------------
if max(line_end_le, line_end_ri) > 0 and type in (ANNOT_POLYGON, ANNOT_POLYLINE):
le_funcs = (None, TOOLS._le_square, TOOLS._le_circle,
TOOLS._le_diamond, TOOLS._le_openarrow,
TOOLS._le_closedarrow, TOOLS._le_butt,
TOOLS._le_ropenarrow, TOOLS._le_rclosedarrow,
TOOLS._le_slash)
le_funcs_range = range(1, len(le_funcs))
d = 4 * max(1, self.border["width"])
rect = self.rect + (-d, -d, d, d)
ap_updated = True
points = self.vertices
ap = b"q\n" + ap + b"\nQ\n"
if line_end_le in le_funcs_range:
p1 = Point(points[0]) * imat
p2 = Point(points[1]) * imat
left = le_funcs[line_end_le](self, p1, p2, False)
ap += bytes(left, "utf8") if not fitz_py2 else left
if line_end_ri in le_funcs_range:
p1 = Point(points[-2]) * imat
p2 = Point(points[-1]) * imat
left = le_funcs[line_end_ri](self, p1, p2, True)
ap += bytes(left, "utf8") if not fitz_py2 else left
if ap_updated:
if rect: # rect modified here?
self.setRect(rect)
self._setAP(ap, rect = 1)
else:
self._setAP(ap, rect = 0)
# always perform a clean to wrap stream by "q" / "Q"
self._cleanContents()
return val | Update the appearance of an annotation. | Below is the the instruction that describes the task:
### Input:
Update the appearance of an annotation.
### Response:
def update(self, fontsize=0.0, text_color=None, border_color=None, fill_color=None, rotate=-1):
"""Update the appearance of an annotation."""
if self.type[0] == ANNOT_WIDGET:
print("Use updateWidget method for form fields.")
return False
val = _fitz.Annot_update(self, fontsize, text_color, border_color, fill_color, rotate)
"""
The following code fixes shortcomings of MuPDF's "pdf_update_annot"
function. Currently these are:
1. Opacity (all annots). MuPDF ignores this proprty. This requires
to add an ExtGState (extended graphics state) object in the
C code as well.
2. Dashing (all annots). MuPDF ignores this proprty.
3. Colors and font size for FreeText annotations.
4. Line end icons also for POLYGON and POLY_LINE annotations.
MuPDF only honors them for LINE annotations.
5. Always perform a "clean" for the annot, because MuPDF does not
enclose their syntax in a string pair "q ... Q", which may cause
Adobe and other readers not to display the annot.
"""
if not val is True: # skip if something went wrong
return val
def color_string(cs, code):
"""Return valid PDF color operator for a given color sequence.
"""
if cs is None: return ""
if hasattr(cs, "__float__") or len(cs) == 1:
app = " g\n" if code == "f" else " G\n"
elif len(cs) == 3:
app = " rg\n" if code == "f" else " RG\n"
else:
app = " k\n" if code == "f" else " K\n"
if hasattr(cs, "__len__"):
col = " ".join(map(str, cs)) + app
else:
col = "%g" % cs + app
return bytes(col, "utf8") if not fitz_py2 else col
type = self.type[0] # get the annot type
dt = self.border["dashes"] # get the dashes spec
bwidth = self.border["width"] # get border line width
stroke = self.colors["stroke"] # get the stroke color
fill = self.colors["fill"] # get the fill color
rect = None # used if we change the rect here
bfill = color_string(fill, "f")
p_ctm = self.parent._getTransformation() # page transformation matrix
imat = ~p_ctm # inverse page transf. matrix
line_end_le, line_end_ri = 0, 0 # line end codes
if self.lineEnds:
line_end_le, line_end_ri = self.lineEnds
ap = self._getAP() # get the annot operator source
ap_updated = False # assume we did nothing
if type == ANNOT_FREETEXT:
CheckColor(fill_color)
CheckColor(border_color)
CheckColor(text_color)
ap_tab = ap.splitlines() # split AP stream into lines
idx_BT = ap_tab.index(b"BT") # line no. of text start
# to avoid effort, we rely on a fixed format generated by MuPDF for
# this annot type: line 0 = fill color, line 5 border color, etc.
if fill_color is not None:
ap_tab[0] = color_string(fill_color, "f")
ap_updated = True
else:
ap_tab[0] = ap_tab[1] = ap_tab[2] = b""
ap_updated = True
if idx_BT == 7:
if bwidth > 0:
if border_color is not None:
ap_tab[4] = color_string(border_color, "s")
ap_updated = True
else: # for zero border width suppress border
ap_tab[3] = b"0 w"
ap_tab[4] = ap_tab[5] = ap_tab[6] = b""
ap_updated = True
if text_color is not None:
ap_tab[idx_BT + 1] = color_string(text_color, "f")
ap_updated = True
if fontsize > 0.0:
x = ap_tab[idx_BT + 2].split()
x[1] = b"%g" % fontsize
ap_tab[idx_BT + 2] = b" ".join(x)
ap_updated = True
if ap_updated:
ap = b"\n".join(ap_tab) # updated AP stream
if bfill != "":
if type == ANNOT_POLYGON:
ap = ap[:-1] + bfill + b"b" # close, fill, and stroke
ap_updated = True
elif type == ANNOT_POLYLINE:
ap = ap[:-1] + bfill + b"B" # fill and stroke
ap_updated = True
# Dashes not handled by MuPDF, so we do it here.
if dt:
dash = "[" + " ".join(map(str, dt)) + "] d\n"
ap = dash.encode("utf-8") + ap
# reset dashing - only applies for LINE annots with line ends given
ap = ap.replace(b"\nS\n", b"\nS\n[] d\n", 1)
ap_updated = True
# Opacity not handled by MuPDF, so we do it here. The /ExtGState object
# "Alp0" referenced here has already been added by our C code.
if 0 <= self.opacity < 1:
ap = b"/Alp0 gs\n" + ap
ap_updated = True
#----------------------------------------------------------------------
# the following handles line end symbols for 'Polygon' and 'Polyline
#----------------------------------------------------------------------
if max(line_end_le, line_end_ri) > 0 and type in (ANNOT_POLYGON, ANNOT_POLYLINE):
le_funcs = (None, TOOLS._le_square, TOOLS._le_circle,
TOOLS._le_diamond, TOOLS._le_openarrow,
TOOLS._le_closedarrow, TOOLS._le_butt,
TOOLS._le_ropenarrow, TOOLS._le_rclosedarrow,
TOOLS._le_slash)
le_funcs_range = range(1, len(le_funcs))
d = 4 * max(1, self.border["width"])
rect = self.rect + (-d, -d, d, d)
ap_updated = True
points = self.vertices
ap = b"q\n" + ap + b"\nQ\n"
if line_end_le in le_funcs_range:
p1 = Point(points[0]) * imat
p2 = Point(points[1]) * imat
left = le_funcs[line_end_le](self, p1, p2, False)
ap += bytes(left, "utf8") if not fitz_py2 else left
if line_end_ri in le_funcs_range:
p1 = Point(points[-2]) * imat
p2 = Point(points[-1]) * imat
left = le_funcs[line_end_ri](self, p1, p2, True)
ap += bytes(left, "utf8") if not fitz_py2 else left
if ap_updated:
if rect: # rect modified here?
self.setRect(rect)
self._setAP(ap, rect = 1)
else:
self._setAP(ap, rect = 0)
# always perform a clean to wrap stream by "q" / "Q"
self._cleanContents()
return val |
def citedReferencesRetrieve(self, queryId, count=100, offset=1,
retrieveParameters=None):
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferencesRetrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) | The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used. | Below is the the instruction that describes the task:
### Input:
The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
### Response:
def citedReferencesRetrieve(self, queryId, count=100, offset=1,
retrieveParameters=None):
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation.
This operation is useful for overcoming the retrieval limit of 100
records per query. For example, a citedReferences operation may find
106 cited references, as revealed by the content of the recordsFound
element, but it returns only records 1-100. You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
:queryId: The query ID from a previous citedReferences operation
:count: Number of records to display in the result. Cannot be less than
0 and cannot be greater than 100. If count is 0 then only the
summary information will be returned.
:offset: First record in results to return. Must be greater than zero
:retrieveParameters: Retrieve parameters. If omitted the result of
make_retrieveParameters(offset, count, 'RS', 'D')
is used.
"""
return self._search.service.citedReferencesRetrieve(
queryId=queryId,
retrieveParameters=(retrieveParameters or
self.make_retrieveParameters(offset, count))
) |
def sample(self, samples=[], **sample_values):
"""
Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple.
"""
if isinstance(samples, tuple):
X, Y = samples
samples = zip(X, Y)
params = dict(self.get_param_values(onlychanged=True),
vdims=self.vdims)
params.pop('extents', None)
params.pop('bounds', None)
if len(sample_values) == self.ndims or len(samples):
if not len(samples):
samples = zip(*[c if isinstance(c, list) else [c] for _, c in
sorted([(self.get_dimension_index(k), v) for k, v in
sample_values.items()])])
table_data = [c+(self._zdata[self._coord2matrix(c)],)
for c in samples]
params['kdims'] = self.kdims
return Table(table_data, **params)
else:
dimension, sample_coord = list(sample_values.items())[0]
if isinstance(sample_coord, slice):
raise ValueError(
'Raster sampling requires coordinates not slices,'
'use regular slicing syntax.')
# Indices inverted for indexing
sample_ind = self.get_dimension_index(dimension)
if sample_ind is None:
raise Exception("Dimension %s not found during sampling" % dimension)
other_dimension = [d for i, d in enumerate(self.kdims) if
i != sample_ind]
# Generate sample slice
sample = [slice(None) for i in range(self.ndims)]
coord_fn = (lambda v: (v, 0)) if not sample_ind else (lambda v: (0, v))
sample[sample_ind] = self._coord2matrix(coord_fn(sample_coord))[abs(sample_ind-1)]
# Sample data
x_vals = self.dimension_values(other_dimension[0].name, False)
ydata = self._zdata[tuple(sample[::-1])]
if hasattr(self, 'bounds') and sample_ind == 0: ydata = ydata[::-1]
data = list(zip(x_vals, ydata))
params['kdims'] = other_dimension
return Curve(data, **params) | Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple. | Below is the the instruction that describes the task:
### Input:
Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple.
### Response:
def sample(self, samples=[], **sample_values):
"""
Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple.
"""
if isinstance(samples, tuple):
X, Y = samples
samples = zip(X, Y)
params = dict(self.get_param_values(onlychanged=True),
vdims=self.vdims)
params.pop('extents', None)
params.pop('bounds', None)
if len(sample_values) == self.ndims or len(samples):
if not len(samples):
samples = zip(*[c if isinstance(c, list) else [c] for _, c in
sorted([(self.get_dimension_index(k), v) for k, v in
sample_values.items()])])
table_data = [c+(self._zdata[self._coord2matrix(c)],)
for c in samples]
params['kdims'] = self.kdims
return Table(table_data, **params)
else:
dimension, sample_coord = list(sample_values.items())[0]
if isinstance(sample_coord, slice):
raise ValueError(
'Raster sampling requires coordinates not slices,'
'use regular slicing syntax.')
# Indices inverted for indexing
sample_ind = self.get_dimension_index(dimension)
if sample_ind is None:
raise Exception("Dimension %s not found during sampling" % dimension)
other_dimension = [d for i, d in enumerate(self.kdims) if
i != sample_ind]
# Generate sample slice
sample = [slice(None) for i in range(self.ndims)]
coord_fn = (lambda v: (v, 0)) if not sample_ind else (lambda v: (0, v))
sample[sample_ind] = self._coord2matrix(coord_fn(sample_coord))[abs(sample_ind-1)]
# Sample data
x_vals = self.dimension_values(other_dimension[0].name, False)
ydata = self._zdata[tuple(sample[::-1])]
if hasattr(self, 'bounds') and sample_ind == 0: ydata = ydata[::-1]
data = list(zip(x_vals, ydata))
params['kdims'] = other_dimension
return Curve(data, **params) |
def _kl_half_normal_half_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_half_normal_half_normal"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2)) | Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b) | Below is the the instruction that describes the task:
### Input:
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
### Response:
def _kl_half_normal_half_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
"""
with tf.name_scope(name or "kl_half_normal_half_normal"):
# Consistent with
# http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 119
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2)) |
def _init_ca(self):
"""Generate the root ca's cert and key.
"""
if not exists(path_join(self.ca_dir, 'ca.cnf')):
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
fh.write(
CA_CONF_TEMPLATE % (self.get_conf_variables()))
if not exists(path_join(self.ca_dir, 'signing.cnf')):
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
fh.write(
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
if exists(self.ca_cert) or exists(self.ca_key):
raise RuntimeError("Initialized called when CA already exists")
cmd = ['openssl', 'req', '-config', self.ca_conf,
'-x509', '-nodes', '-newkey', 'rsa',
'-days', self.default_ca_expiry,
'-keyout', self.ca_key, '-out', self.ca_cert,
'-outform', 'PEM']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
log("CA Init:\n %s" % output, level=DEBUG) | Generate the root ca's cert and key. | Below is the the instruction that describes the task:
### Input:
Generate the root ca's cert and key.
### Response:
def _init_ca(self):
"""Generate the root ca's cert and key.
"""
if not exists(path_join(self.ca_dir, 'ca.cnf')):
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
fh.write(
CA_CONF_TEMPLATE % (self.get_conf_variables()))
if not exists(path_join(self.ca_dir, 'signing.cnf')):
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
fh.write(
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
if exists(self.ca_cert) or exists(self.ca_key):
raise RuntimeError("Initialized called when CA already exists")
cmd = ['openssl', 'req', '-config', self.ca_conf,
'-x509', '-nodes', '-newkey', 'rsa',
'-days', self.default_ca_expiry,
'-keyout', self.ca_key, '-out', self.ca_cert,
'-outform', 'PEM']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
log("CA Init:\n %s" % output, level=DEBUG) |
def __generate_new_sandbox_user():
"""
:rtype: endpoint.SandboxUser
"""
url = ApiEnvironmentType.SANDBOX.uri_base + __ENDPOINT_SANDBOX_USER
headers = {
ApiClient.HEADER_REQUEST_ID: __UNIQUE_REQUEST_ID,
ApiClient.HEADER_CACHE_CONTROL: ApiClient._CACHE_CONTROL_NONE,
ApiClient.HEADER_GEOLOCATION: ApiClient._GEOLOCATION_ZERO,
ApiClient.HEADER_LANGUAGE: ApiClient._LANGUAGE_EN_US,
ApiClient.HEADER_REGION: ApiClient._REGION_NL_NL,
}
response = requests.request(ApiClient._METHOD_POST, url, headers=headers)
if response.status_code is ApiClient._STATUS_CODE_OK:
response_json = json.loads(response.text)
return endpoint.SandboxUser.from_json(
json.dumps(response_json[__FIELD_RESPONSE][__INDEX_FIRST][
__FIELD_API_KEY]))
raise BunqException(_ERROR_COULD_NOT_CREATE_NEW_SANDBOX_USER) | :rtype: endpoint.SandboxUser | Below is the the instruction that describes the task:
### Input:
:rtype: endpoint.SandboxUser
### Response:
def __generate_new_sandbox_user():
"""
:rtype: endpoint.SandboxUser
"""
url = ApiEnvironmentType.SANDBOX.uri_base + __ENDPOINT_SANDBOX_USER
headers = {
ApiClient.HEADER_REQUEST_ID: __UNIQUE_REQUEST_ID,
ApiClient.HEADER_CACHE_CONTROL: ApiClient._CACHE_CONTROL_NONE,
ApiClient.HEADER_GEOLOCATION: ApiClient._GEOLOCATION_ZERO,
ApiClient.HEADER_LANGUAGE: ApiClient._LANGUAGE_EN_US,
ApiClient.HEADER_REGION: ApiClient._REGION_NL_NL,
}
response = requests.request(ApiClient._METHOD_POST, url, headers=headers)
if response.status_code is ApiClient._STATUS_CODE_OK:
response_json = json.loads(response.text)
return endpoint.SandboxUser.from_json(
json.dumps(response_json[__FIELD_RESPONSE][__INDEX_FIRST][
__FIELD_API_KEY]))
raise BunqException(_ERROR_COULD_NOT_CREATE_NEW_SANDBOX_USER) |
def genargs(prog: Optional[str] = None) -> ArgumentParser:
"""
Create a command line parser
:return: parser
"""
parser = ArgumentParser(prog)
parser.add_argument("rdf", help="Input RDF file or SPARQL endpoint if slurper or sparql options")
parser.add_argument("shex", help="ShEx specification")
parser.add_argument("-f", "--format", help="Input RDF Format", default="turtle")
parser.add_argument("-s", "--start", help="Start shape. If absent use ShEx start node.")
parser.add_argument("-ut", "--usetype", help="Start shape is rdf:type of focus", action="store_true")
parser.add_argument("-sp", "--startpredicate", help="Start shape is object of this predicate")
parser.add_argument("-fn", "--focus", help="RDF focus node")
parser.add_argument("-A", "--allsubjects", help="Evaluate all non-bnode subjects in the graph", action="store_true")
parser.add_argument("-d", "--debug", action="store_true", help="Add debug output")
parser.add_argument("-ss", "--slurper", action="store_true", help="Use SPARQL slurper graph")
parser.add_argument("-cf", "--flattener", action="store_true", help="Use RDF Collections flattener graph")
parser.add_argument("-sq", "--sparql", help="SPARQL query to generate focus nodes")
parser.add_argument("-se", "--stoponerror", help="Stop on an error", action="store_true")
parser.add_argument("--stopafter", help="Stop after N nodes", type=int)
parser.add_argument("-ps", "--printsparql", help="Print SPARQL queries as they are executed", action="store_true")
parser.add_argument("-pr", "--printsparqlresults", help="Print SPARQL query and results", action="store_true")
parser.add_argument("-gn", "--graphname", help="Specific SPARQL graph to query - use '' for any named graph")
parser.add_argument("-pb", "--persistbnodes", help="Treat BNodes as persistent in SPARQL endpoint",
action="store_true")
return parser | Create a command line parser
:return: parser | Below is the the instruction that describes the task:
### Input:
Create a command line parser
:return: parser
### Response:
def genargs(prog: Optional[str] = None) -> ArgumentParser:
"""
Create a command line parser
:return: parser
"""
parser = ArgumentParser(prog)
parser.add_argument("rdf", help="Input RDF file or SPARQL endpoint if slurper or sparql options")
parser.add_argument("shex", help="ShEx specification")
parser.add_argument("-f", "--format", help="Input RDF Format", default="turtle")
parser.add_argument("-s", "--start", help="Start shape. If absent use ShEx start node.")
parser.add_argument("-ut", "--usetype", help="Start shape is rdf:type of focus", action="store_true")
parser.add_argument("-sp", "--startpredicate", help="Start shape is object of this predicate")
parser.add_argument("-fn", "--focus", help="RDF focus node")
parser.add_argument("-A", "--allsubjects", help="Evaluate all non-bnode subjects in the graph", action="store_true")
parser.add_argument("-d", "--debug", action="store_true", help="Add debug output")
parser.add_argument("-ss", "--slurper", action="store_true", help="Use SPARQL slurper graph")
parser.add_argument("-cf", "--flattener", action="store_true", help="Use RDF Collections flattener graph")
parser.add_argument("-sq", "--sparql", help="SPARQL query to generate focus nodes")
parser.add_argument("-se", "--stoponerror", help="Stop on an error", action="store_true")
parser.add_argument("--stopafter", help="Stop after N nodes", type=int)
parser.add_argument("-ps", "--printsparql", help="Print SPARQL queries as they are executed", action="store_true")
parser.add_argument("-pr", "--printsparqlresults", help="Print SPARQL query and results", action="store_true")
parser.add_argument("-gn", "--graphname", help="Specific SPARQL graph to query - use '' for any named graph")
parser.add_argument("-pb", "--persistbnodes", help="Treat BNodes as persistent in SPARQL endpoint",
action="store_true")
return parser |
def currentValue(self):
"""
Returns the current value for the widget. If this widget is checkable
then the bitor value for all checked items is returned, otherwise, the
selected value is returned.
:return <int>
"""
enum = self.enum()
if ( self.isCheckable() ):
value = 0
for i in self.checkedIndexes():
value |= enum[nativestring(self.itemText(i))]
return value
else:
try:
return enum[nativestring(self.itemText(self.currentIndex()))]
except KeyError:
return 0 | Returns the current value for the widget. If this widget is checkable
then the bitor value for all checked items is returned, otherwise, the
selected value is returned.
:return <int> | Below is the the instruction that describes the task:
### Input:
Returns the current value for the widget. If this widget is checkable
then the bitor value for all checked items is returned, otherwise, the
selected value is returned.
:return <int>
### Response:
def currentValue(self):
"""
Returns the current value for the widget. If this widget is checkable
then the bitor value for all checked items is returned, otherwise, the
selected value is returned.
:return <int>
"""
enum = self.enum()
if ( self.isCheckable() ):
value = 0
for i in self.checkedIndexes():
value |= enum[nativestring(self.itemText(i))]
return value
else:
try:
return enum[nativestring(self.itemText(self.currentIndex()))]
except KeyError:
return 0 |
def info(gandi, resource):
""" Display information about a disk.
Resource can be a disk name or ID
"""
output_keys = ['name', 'state', 'size', 'type', 'id', 'dc', 'vm',
'profile', 'kernel', 'cmdline']
resource = sorted(tuple(set(resource)))
vms = dict([(vm['id'], vm) for vm in gandi.iaas.list()])
datacenters = gandi.datacenter.list()
result = []
for num, item in enumerate(resource):
if num:
gandi.separator_line()
disk = gandi.disk.info(item)
output_disk(gandi, disk, datacenters, vms, [], output_keys)
result.append(disk)
return result | Display information about a disk.
Resource can be a disk name or ID | Below is the the instruction that describes the task:
### Input:
Display information about a disk.
Resource can be a disk name or ID
### Response:
def info(gandi, resource):
""" Display information about a disk.
Resource can be a disk name or ID
"""
output_keys = ['name', 'state', 'size', 'type', 'id', 'dc', 'vm',
'profile', 'kernel', 'cmdline']
resource = sorted(tuple(set(resource)))
vms = dict([(vm['id'], vm) for vm in gandi.iaas.list()])
datacenters = gandi.datacenter.list()
result = []
for num, item in enumerate(resource):
if num:
gandi.separator_line()
disk = gandi.disk.info(item)
output_disk(gandi, disk, datacenters, vms, [], output_keys)
result.append(disk)
return result |
def items(self):
"""Yield the async reuslts for the context."""
for key, task in self._tasks:
if not (task and task.result):
yield key, None
else:
yield key, json.loads(task.result)["payload"] | Yield the async reuslts for the context. | Below is the the instruction that describes the task:
### Input:
Yield the async reuslts for the context.
### Response:
def items(self):
"""Yield the async reuslts for the context."""
for key, task in self._tasks:
if not (task and task.result):
yield key, None
else:
yield key, json.loads(task.result)["payload"] |
def schedule_in(self, job, timedelta):
""" Schedule job to run at datetime.timedelta from now."""
now = long(self._now() * 1e6)
when = now + timedelta.total_seconds() * 1e6
self.schedule(job, when) | Schedule job to run at datetime.timedelta from now. | Below is the the instruction that describes the task:
### Input:
Schedule job to run at datetime.timedelta from now.
### Response:
def schedule_in(self, job, timedelta):
""" Schedule job to run at datetime.timedelta from now."""
now = long(self._now() * 1e6)
when = now + timedelta.total_seconds() * 1e6
self.schedule(job, when) |
def get_content_type(content_type):
"""Extract the MIME type value from a content type string.
Removes any subtype and parameter values that may be present in the string.
Args:
content_type: str
String with content type and optional subtype and parameter fields.
Returns:
str: String with only content type
Example:
::
Input: multipart/form-data; boundary=aBoundaryString
Returns: multipart/form-data
"""
m = email.message.Message()
m['Content-Type'] = content_type
return m.get_content_type() | Extract the MIME type value from a content type string.
Removes any subtype and parameter values that may be present in the string.
Args:
content_type: str
String with content type and optional subtype and parameter fields.
Returns:
str: String with only content type
Example:
::
Input: multipart/form-data; boundary=aBoundaryString
Returns: multipart/form-data | Below is the the instruction that describes the task:
### Input:
Extract the MIME type value from a content type string.
Removes any subtype and parameter values that may be present in the string.
Args:
content_type: str
String with content type and optional subtype and parameter fields.
Returns:
str: String with only content type
Example:
::
Input: multipart/form-data; boundary=aBoundaryString
Returns: multipart/form-data
### Response:
def get_content_type(content_type):
"""Extract the MIME type value from a content type string.
Removes any subtype and parameter values that may be present in the string.
Args:
content_type: str
String with content type and optional subtype and parameter fields.
Returns:
str: String with only content type
Example:
::
Input: multipart/form-data; boundary=aBoundaryString
Returns: multipart/form-data
"""
m = email.message.Message()
m['Content-Type'] = content_type
return m.get_content_type() |
def plot(self, columns=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients, including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
ax = errorbar_kwargs.pop("ax", None) or plt.figure().add_subplot(111)
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
if columns is None:
columns = self.hazards_.index
yaxis_locations = list(range(len(columns)))
symmetric_errors = z * self.standard_errors_[columns].to_frame().squeeze(axis=1).values.copy()
hazards = self.hazards_[columns].values.copy()
order = np.argsort(hazards)
ax.errorbar(hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
best_ylim = ax.get_ylim()
ax.vlines(0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
return ax | Produces a visual representation of the coefficients, including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited. | Below is the the instruction that describes the task:
### Input:
Produces a visual representation of the coefficients, including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
### Response:
def plot(self, columns=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients, including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
ax = errorbar_kwargs.pop("ax", None) or plt.figure().add_subplot(111)
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
if columns is None:
columns = self.hazards_.index
yaxis_locations = list(range(len(columns)))
symmetric_errors = z * self.standard_errors_[columns].to_frame().squeeze(axis=1).values.copy()
hazards = self.hazards_[columns].values.copy()
order = np.argsort(hazards)
ax.errorbar(hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
best_ylim = ax.get_ylim()
ax.vlines(0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
return ax |
def change_existence(self, is_hidden):
# type: (bool) -> None
'''
Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden, False otherwise.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if is_hidden:
self.file_flags |= (1 << self.FILE_FLAG_EXISTENCE_BIT)
else:
self.file_flags &= ~(1 << self.FILE_FLAG_EXISTENCE_BIT) | Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden, False otherwise.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden, False otherwise.
Returns:
Nothing.
### Response:
def change_existence(self, is_hidden):
# type: (bool) -> None
'''
Change the ISO9660 existence flag of this Directory Record.
Parameters:
is_hidden - True if this Directory Record should be hidden, False otherwise.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if is_hidden:
self.file_flags |= (1 << self.FILE_FLAG_EXISTENCE_BIT)
else:
self.file_flags &= ~(1 << self.FILE_FLAG_EXISTENCE_BIT) |
def hardware_flexport_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id = ET.SubElement(flexport, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hardware_flexport_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id = ET.SubElement(flexport, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
# Connecting the following signal once the dockwidget has been created:
self.shell.exception_occurred.connect(self.exception_occurred) | Register plugin in Spyder's main window | Below is the the instruction that describes the task:
### Input:
Register plugin in Spyder's main window
### Response:
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
# Connecting the following signal once the dockwidget has been created:
self.shell.exception_occurred.connect(self.exception_occurred) |
def value(self):
"""gets the color value"""
if self._outline is None:
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
}
else:
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
"outline" : self._outline.value
} | gets the color value | Below is the the instruction that describes the task:
### Input:
gets the color value
### Response:
def value(self):
"""gets the color value"""
if self._outline is None:
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
}
else:
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
"outline" : self._outline.value
} |
def download_needed(self, response, outfile, quiet=True):
""" determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True)
"""
try:
remote_date = datetime.strptime(response.headers['Last-Modified'],
'%a, %d %b %Y %X %Z')
if isfile(outfile):
local_date = datetime.fromtimestamp(os.path.getmtime(outfile))
if remote_date <= local_date:
if not quiet:
print(os.path.basename(outfile) +
': Skipping, found more recently modified local '
'copy (use --force to force download)')
return False
except:
pass
return True | determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True) | Below is the the instruction that describes the task:
### Input:
determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True)
### Response:
def download_needed(self, response, outfile, quiet=True):
""" determine if a download is needed based on timestamp. Return True
if needed (remote is newer) or False if local is newest.
Parameters
==========
response: the response from the API
outfile: the output file to write to
quiet: suppress verbose output (default is True)
"""
try:
remote_date = datetime.strptime(response.headers['Last-Modified'],
'%a, %d %b %Y %X %Z')
if isfile(outfile):
local_date = datetime.fromtimestamp(os.path.getmtime(outfile))
if remote_date <= local_date:
if not quiet:
print(os.path.basename(outfile) +
': Skipping, found more recently modified local '
'copy (use --force to force download)')
return False
except:
pass
return True |
def is_denied(self, role, method, resource):
"""Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._denied | Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked. | Below is the the instruction that describes the task:
### Input:
Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
### Response:
def is_denied(self, role, method, resource):
"""Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._denied |
def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Branch object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuBranch just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuBranch, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
"""
for n,b in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambubranchclass = MambuBranch
branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs)
branch.init(b, *args, **kwargs)
self.attrs[n] = branch | The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Branch object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuBranch just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuBranch, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list. | Below is the the instruction that describes the task:
### Input:
The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Branch object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuBranch just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuBranch, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
### Response:
def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Branch object for each one, initializing them
one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuBranch just
created.
.. todo:: pass a valid (perhaps default) urlfunc, and its
corresponding id to entid to each MambuBranch, telling
MambuStruct not to connect() by default. It's desirable to
connect at any other further moment to refresh some element in
the list.
"""
for n,b in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambubranchclass = MambuBranch
branch = self.mambubranchclass(urlfunc=None, entid=None, *args, **kwargs)
branch.init(b, *args, **kwargs)
self.attrs[n] = branch |
def err(msg, *args, **kw):
# type: (str, *Any, **Any) -> None
""" Per step status messages
Use this locally in a command definition to highlight more important
information.
"""
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <31>{}<0>'.format(msg)) | Per step status messages
Use this locally in a command definition to highlight more important
information. | Below is the the instruction that describes the task:
### Input:
Per step status messages
Use this locally in a command definition to highlight more important
information.
### Response:
def err(msg, *args, **kw):
# type: (str, *Any, **Any) -> None
""" Per step status messages
Use this locally in a command definition to highlight more important
information.
"""
if len(args) or len(kw):
msg = msg.format(*args, **kw)
shell.cprint('-- <31>{}<0>'.format(msg)) |
def get_max_bond_distance(self, el1_sym, el2_sym):
"""
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
"""
return sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2) | Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length | Below is the the instruction that describes the task:
### Input:
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
### Response:
def get_max_bond_distance(self, el1_sym, el2_sym):
"""
Use Jmol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
Returns: (float) max bond length
"""
return sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + self.tol) ** 2) |
def signature_str(self):
"""
str: func_name(type1,type2) returns (type3)
Return the function signature as a str (contains the return values)
"""
name, parameters, returnVars = self.signature
return name+'('+','.join(parameters)+') returns('+','.join(returnVars)+')' | str: func_name(type1,type2) returns (type3)
Return the function signature as a str (contains the return values) | Below is the the instruction that describes the task:
### Input:
str: func_name(type1,type2) returns (type3)
Return the function signature as a str (contains the return values)
### Response:
def signature_str(self):
"""
str: func_name(type1,type2) returns (type3)
Return the function signature as a str (contains the return values)
"""
name, parameters, returnVars = self.signature
return name+'('+','.join(parameters)+') returns('+','.join(returnVars)+')' |
def add(symbol: str, date, value, currency: str):
""" Add individual price """
symbol = symbol.upper()
currency = currency.upper()
app = PriceDbApplication()
price = PriceModel()
# security = SecuritySymbol("", "")
price.symbol.parse(symbol)
# price.symbol.mnemonic = price.symbol.mnemonic.upper()
# date_str = f"{date}"
# date_format = "%Y-%m-%d"
# if time:
# date_str = f"{date_str}T{time}"
# date_format += "T%H:%M:%S"
# datum.from_iso_date_string(date)
# price.datetime = datetime.strptime(date_str, date_format)
price.datum.from_iso_date_string(date)
price.value = Decimal(value)
price.currency = currency
app.add_price(price)
app.save()
click.echo("Price added.") | Add individual price | Below is the the instruction that describes the task:
### Input:
Add individual price
### Response:
def add(symbol: str, date, value, currency: str):
""" Add individual price """
symbol = symbol.upper()
currency = currency.upper()
app = PriceDbApplication()
price = PriceModel()
# security = SecuritySymbol("", "")
price.symbol.parse(symbol)
# price.symbol.mnemonic = price.symbol.mnemonic.upper()
# date_str = f"{date}"
# date_format = "%Y-%m-%d"
# if time:
# date_str = f"{date_str}T{time}"
# date_format += "T%H:%M:%S"
# datum.from_iso_date_string(date)
# price.datetime = datetime.strptime(date_str, date_format)
price.datum.from_iso_date_string(date)
price.value = Decimal(value)
price.currency = currency
app.add_price(price)
app.save()
click.echo("Price added.") |
def pivot_wavelength(self):
"""Get the bandpass' pivot wavelength.
Unlike calc_pivot_wavelength(), this function will use a cached
value if available.
"""
wl = self.registry._pivot_wavelengths.get((self.telescope, self.band))
if wl is not None:
return wl
wl = self.calc_pivot_wavelength()
self.registry.register_pivot_wavelength(self.telescope, self.band, wl)
return wl | Get the bandpass' pivot wavelength.
Unlike calc_pivot_wavelength(), this function will use a cached
value if available. | Below is the the instruction that describes the task:
### Input:
Get the bandpass' pivot wavelength.
Unlike calc_pivot_wavelength(), this function will use a cached
value if available.
### Response:
def pivot_wavelength(self):
"""Get the bandpass' pivot wavelength.
Unlike calc_pivot_wavelength(), this function will use a cached
value if available.
"""
wl = self.registry._pivot_wavelengths.get((self.telescope, self.band))
if wl is not None:
return wl
wl = self.calc_pivot_wavelength()
self.registry.register_pivot_wavelength(self.telescope, self.band, wl)
return wl |
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
s = set()
for name in names:
try:
s.add(_opcode(name))
except KeyError:
pass
return s | Return a set of opcodes by the names in `names`. | Below is the the instruction that describes the task:
### Input:
Return a set of opcodes by the names in `names`.
### Response:
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
s = set()
for name in names:
try:
s.add(_opcode(name))
except KeyError:
pass
return s |
def destroy_all(cls, objs):
"""
在一个请求中 destroy 多个 leancloud.Object 对象实例。
:param objs: 需要 destroy 的对象
:type objs: list
"""
if not objs:
return
if any(x.is_new() for x in objs):
raise ValueError("Could not destroy unsaved object")
dumped_objs = []
for obj in objs:
dumped_obj = {
'method': 'DELETE',
'path': '/{0}/classes/{1}/{2}'.format(client.SERVER_VERSION, obj._class_name, obj.id),
'body': obj._flags,
}
dumped_objs.append(dumped_obj)
response = client.post('/batch', params={'requests': dumped_objs}).json()
errors = []
for idx, obj in enumerate(objs):
content = response[idx]
error = content.get('error')
if error:
errors.append(leancloud.LeanCloudError(error.get('code'), error.get('error')))
if errors:
# TODO: how to raise list of errors?
# raise MultipleValidationErrors(errors)
# add test
raise errors[0] | 在一个请求中 destroy 多个 leancloud.Object 对象实例。
:param objs: 需要 destroy 的对象
:type objs: list | Below is the the instruction that describes the task:
### Input:
在一个请求中 destroy 多个 leancloud.Object 对象实例。
:param objs: 需要 destroy 的对象
:type objs: list
### Response:
def destroy_all(cls, objs):
"""
在一个请求中 destroy 多个 leancloud.Object 对象实例。
:param objs: 需要 destroy 的对象
:type objs: list
"""
if not objs:
return
if any(x.is_new() for x in objs):
raise ValueError("Could not destroy unsaved object")
dumped_objs = []
for obj in objs:
dumped_obj = {
'method': 'DELETE',
'path': '/{0}/classes/{1}/{2}'.format(client.SERVER_VERSION, obj._class_name, obj.id),
'body': obj._flags,
}
dumped_objs.append(dumped_obj)
response = client.post('/batch', params={'requests': dumped_objs}).json()
errors = []
for idx, obj in enumerate(objs):
content = response[idx]
error = content.get('error')
if error:
errors.append(leancloud.LeanCloudError(error.get('code'), error.get('error')))
if errors:
# TODO: how to raise list of errors?
# raise MultipleValidationErrors(errors)
# add test
raise errors[0] |
def body_processor(self, chunk):
"""
Process body of response.
"""
if self.config_nobody:
self.curl.grab_callback_interrupted = True
return 0
bytes_read = len(chunk)
self.response_body_bytes_read += bytes_read
if self.body_file:
self.body_file.write(chunk)
else:
self.response_body_chunks.append(chunk)
if self.config_body_maxsize is not None:
if self.response_body_bytes_read > self.config_body_maxsize:
logger.debug('Response body max size limit reached: %s',
self.config_body_maxsize)
self.curl.grab_callback_interrupted = True
return 0
# Returning None implies that all bytes were written
return None | Process body of response. | Below is the the instruction that describes the task:
### Input:
Process body of response.
### Response:
def body_processor(self, chunk):
"""
Process body of response.
"""
if self.config_nobody:
self.curl.grab_callback_interrupted = True
return 0
bytes_read = len(chunk)
self.response_body_bytes_read += bytes_read
if self.body_file:
self.body_file.write(chunk)
else:
self.response_body_chunks.append(chunk)
if self.config_body_maxsize is not None:
if self.response_body_bytes_read > self.config_body_maxsize:
logger.debug('Response body max size limit reached: %s',
self.config_body_maxsize)
self.curl.grab_callback_interrupted = True
return 0
# Returning None implies that all bytes were written
return None |
def json2paramater(self, ss_spec):
'''
generate all possible configs for hyperparameters from hyperparameter space.
ss_spec: hyperparameter space
'''
if isinstance(ss_spec, dict):
if '_type' in ss_spec.keys():
_type = ss_spec['_type']
_value = ss_spec['_value']
chosen_params = list()
if _type == 'choice':
for value in _value:
choice = self.json2paramater(value)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
else:
chosen_params = self.parse_qtype(_type, _value)
else:
chosen_params = dict()
for key in ss_spec.keys():
chosen_params[key] = self.json2paramater(ss_spec[key])
return self.expand_parameters(chosen_params)
elif isinstance(ss_spec, list):
chosen_params = list()
for subspec in ss_spec[1:]:
choice = self.json2paramater(subspec)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
chosen_params = list(map(lambda v: {ss_spec[0]: v}, chosen_params))
else:
chosen_params = copy.deepcopy(ss_spec)
return chosen_params | generate all possible configs for hyperparameters from hyperparameter space.
ss_spec: hyperparameter space | Below is the the instruction that describes the task:
### Input:
generate all possible configs for hyperparameters from hyperparameter space.
ss_spec: hyperparameter space
### Response:
def json2paramater(self, ss_spec):
'''
generate all possible configs for hyperparameters from hyperparameter space.
ss_spec: hyperparameter space
'''
if isinstance(ss_spec, dict):
if '_type' in ss_spec.keys():
_type = ss_spec['_type']
_value = ss_spec['_value']
chosen_params = list()
if _type == 'choice':
for value in _value:
choice = self.json2paramater(value)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
else:
chosen_params = self.parse_qtype(_type, _value)
else:
chosen_params = dict()
for key in ss_spec.keys():
chosen_params[key] = self.json2paramater(ss_spec[key])
return self.expand_parameters(chosen_params)
elif isinstance(ss_spec, list):
chosen_params = list()
for subspec in ss_spec[1:]:
choice = self.json2paramater(subspec)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
chosen_params.append(choice)
chosen_params = list(map(lambda v: {ss_spec[0]: v}, chosen_params))
else:
chosen_params = copy.deepcopy(ss_spec)
return chosen_params |
def _homogenize_waves(wave_a, wave_b):
"""
Generate combined independent variable vector.
The combination is from two waveforms and the (possibly interpolated)
dependent variable vectors of these two waveforms
"""
indep_vector = _get_indep_vector(wave_a, wave_b)
dep_vector_a = _interp_dep_vector(wave_a, indep_vector)
dep_vector_b = _interp_dep_vector(wave_b, indep_vector)
return (indep_vector, dep_vector_a, dep_vector_b) | Generate combined independent variable vector.
The combination is from two waveforms and the (possibly interpolated)
dependent variable vectors of these two waveforms | Below is the the instruction that describes the task:
### Input:
Generate combined independent variable vector.
The combination is from two waveforms and the (possibly interpolated)
dependent variable vectors of these two waveforms
### Response:
def _homogenize_waves(wave_a, wave_b):
"""
Generate combined independent variable vector.
The combination is from two waveforms and the (possibly interpolated)
dependent variable vectors of these two waveforms
"""
indep_vector = _get_indep_vector(wave_a, wave_b)
dep_vector_a = _interp_dep_vector(wave_a, indep_vector)
dep_vector_b = _interp_dep_vector(wave_b, indep_vector)
return (indep_vector, dep_vector_a, dep_vector_b) |
def prt_txt(prt, goea_results, prtfmt=None, **kws):
"""Print GOEA results in text format."""
objprt = PrtFmt()
if prtfmt is None:
flds = ['GO', 'NS', 'p_uncorrected',
'ratio_in_study', 'ratio_in_pop', 'depth', 'name', 'study_items']
prtfmt = objprt.get_prtfmt_str(flds)
prtfmt = objprt.adjust_prtfmt(prtfmt)
prt_flds = RPT.get_fmtflds(prtfmt)
data_nts = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws)
RPT.prt_txt(prt, data_nts, prtfmt, prt_flds, **kws)
return data_nts | Print GOEA results in text format. | Below is the the instruction that describes the task:
### Input:
Print GOEA results in text format.
### Response:
def prt_txt(prt, goea_results, prtfmt=None, **kws):
"""Print GOEA results in text format."""
objprt = PrtFmt()
if prtfmt is None:
flds = ['GO', 'NS', 'p_uncorrected',
'ratio_in_study', 'ratio_in_pop', 'depth', 'name', 'study_items']
prtfmt = objprt.get_prtfmt_str(flds)
prtfmt = objprt.adjust_prtfmt(prtfmt)
prt_flds = RPT.get_fmtflds(prtfmt)
data_nts = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws)
RPT.prt_txt(prt, data_nts, prtfmt, prt_flds, **kws)
return data_nts |
def _rotate_and_chop(self, verts, normal, axis=[0, 0, 1]):
r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing
"""
xaxis = [1, 0, 0]
yaxis = [0, 1, 0]
zaxis = [0, 0, 1]
angle = tr.angle_between_vectors(normal, axis)
if angle == 0.0 or angle == np.pi:
# We are already aligned
facet = verts
else:
M = tr.rotation_matrix(tr.angle_between_vectors(normal, axis),
tr.vector_product(normal, axis))
try:
facet = np.dot(verts, M[:3, :3].T)
except ValueError:
pass
try:
x = facet[:, 0]
y = facet[:, 1]
z = facet[:, 2]
except IndexError:
x = facet[0]
y = facet[1]
z = facet[2]
# Work out span of points and set axes scales to cover this and be
# equal in both dimensions
if axis == xaxis:
output = np.column_stack((y, z))
elif axis == yaxis:
output = np.column_stack((x, z))
elif axis == zaxis:
output = np.column_stack((x, y))
else:
output = facet
return output | r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing | Below is the the instruction that describes the task:
### Input:
r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing
### Response:
def _rotate_and_chop(self, verts, normal, axis=[0, 0, 1]):
r"""
Method to rotate a set of vertices (or coords) to align with an axis
points must be coplanar and normal must be given
Chops axis coord to give vertices back in 2D
Used to prepare verts for printing or calculating convex hull in order
to arrange them in hull order for calculations and printing
"""
xaxis = [1, 0, 0]
yaxis = [0, 1, 0]
zaxis = [0, 0, 1]
angle = tr.angle_between_vectors(normal, axis)
if angle == 0.0 or angle == np.pi:
# We are already aligned
facet = verts
else:
M = tr.rotation_matrix(tr.angle_between_vectors(normal, axis),
tr.vector_product(normal, axis))
try:
facet = np.dot(verts, M[:3, :3].T)
except ValueError:
pass
try:
x = facet[:, 0]
y = facet[:, 1]
z = facet[:, 2]
except IndexError:
x = facet[0]
y = facet[1]
z = facet[2]
# Work out span of points and set axes scales to cover this and be
# equal in both dimensions
if axis == xaxis:
output = np.column_stack((y, z))
elif axis == yaxis:
output = np.column_stack((x, z))
elif axis == zaxis:
output = np.column_stack((x, y))
else:
output = facet
return output |
def assign_default_storage_policy_to_datastore(policy, datastore,
service_instance=None):
'''
Assigns a storage policy as the default policy to a datastore.
policy
Name of the policy to assign.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.assign_storage_policy_to_datastore
policy='policy name' datastore=ds1
'''
log.trace('Assigning policy %s to datastore %s', policy, datastore)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
# Find policy
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError('Policy \'{0}\' was not found'
''.format(policy))
policy_ref = policies[0]
# Find datastore
target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref,
datastore_names=[datastore])
if not ds_refs:
raise VMwareObjectRetrievalError('Datastore \'{0}\' was not '
'found'.format(datastore))
ds_ref = ds_refs[0]
salt.utils.pbm.assign_default_storage_policy_to_datastore(
profile_manager, policy_ref, ds_ref)
return True | Assigns a storage policy as the default policy to a datastore.
policy
Name of the policy to assign.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.assign_storage_policy_to_datastore
policy='policy name' datastore=ds1 | Below is the the instruction that describes the task:
### Input:
Assigns a storage policy as the default policy to a datastore.
policy
Name of the policy to assign.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.assign_storage_policy_to_datastore
policy='policy name' datastore=ds1
### Response:
def assign_default_storage_policy_to_datastore(policy, datastore,
service_instance=None):
'''
Assigns a storage policy as the default policy to a datastore.
policy
Name of the policy to assign.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.assign_storage_policy_to_datastore
policy='policy name' datastore=ds1
'''
log.trace('Assigning policy %s to datastore %s', policy, datastore)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
# Find policy
policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy])
if not policies:
raise VMwareObjectRetrievalError('Policy \'{0}\' was not found'
''.format(policy))
policy_ref = policies[0]
# Find datastore
target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref,
datastore_names=[datastore])
if not ds_refs:
raise VMwareObjectRetrievalError('Datastore \'{0}\' was not '
'found'.format(datastore))
ds_ref = ds_refs[0]
salt.utils.pbm.assign_default_storage_policy_to_datastore(
profile_manager, policy_ref, ds_ref)
return True |
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return | Handle close request.
Parameters
----------
port : str
Device name/port. | Below is the the instruction that describes the task:
### Input:
Handle close request.
Parameters
----------
port : str
Device name/port.
### Response:
def _serial_close(self, port):
'''
Handle close request.
Parameters
----------
port : str
Device name/port.
'''
if port in self.open_devices:
try:
self.open_devices[port].close()
except Exception as exception:
logger.error('Error closing device `%s`: %s', port, exception)
return
else:
logger.debug('Device not connected to `%s`', port)
self._publish_status(port)
return |
def near_to_position(self, position, max_distance):
'''Returns true iff the record is within max_distance of the given position.
Note: chromosome name not checked, so that's up to you to do first.'''
end = self.ref_end_pos()
return self.POS <= position <= end or abs(position - self.POS) <= max_distance or abs(position - end) <= max_distance | Returns true iff the record is within max_distance of the given position.
Note: chromosome name not checked, so that's up to you to do first. | Below is the the instruction that describes the task:
### Input:
Returns true iff the record is within max_distance of the given position.
Note: chromosome name not checked, so that's up to you to do first.
### Response:
def near_to_position(self, position, max_distance):
'''Returns true iff the record is within max_distance of the given position.
Note: chromosome name not checked, so that's up to you to do first.'''
end = self.ref_end_pos()
return self.POS <= position <= end or abs(position - self.POS) <= max_distance or abs(position - end) <= max_distance |
def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close() | Export the results. | Below is the the instruction that describes the task:
### Input:
Export the results.
### Response:
def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.