text stringlengths 81 112k |
|---|
Run the callback
def callback(self):
'''Run the callback'''
self._callback(*self._args, **self._kwargs)
self._last_checked = time.time() |
Run the callback periodically
def run(self):
'''Run the callback periodically'''
while not self.wait(self.delay()):
try:
logger.info('Invoking callback %s', self.callback)
self.callback()
except StandardError:
logger.exception('Callback failed') |
Login to MediaFire account.
Keyword arguments:
email -- account email
password -- account password
app_id -- application ID
api_key -- API Key (optional)
def login(self, email=None, password=None, app_id=None, api_key=None):
"""Login to MediaFire account.
Keyword arguments:
email -- account email
password -- account password
app_id -- application ID
api_key -- API Key (optional)
"""
session_token = self.api.user_get_session_token(
app_id=app_id, email=email, password=password, api_key=api_key)
# install session token back into api client
self.api.session = session_token |
Return resource described by MediaFire URI.
uri -- MediaFire URI
Examples:
Folder (using folderkey):
mf:r5g3p2z0sqs3j
mf:r5g3p2z0sqs3j/folder/file.ext
File (using quickkey):
mf:xkr43dadqa3o2p2
Path:
mf:///Documents/file.ext
def get_resource_by_uri(self, uri):
"""Return resource described by MediaFire URI.
uri -- MediaFire URI
Examples:
Folder (using folderkey):
mf:r5g3p2z0sqs3j
mf:r5g3p2z0sqs3j/folder/file.ext
File (using quickkey):
mf:xkr43dadqa3o2p2
Path:
mf:///Documents/file.ext
"""
location = self._parse_uri(uri)
if location.startswith("/"):
# Use path lookup only, root=myfiles
result = self.get_resource_by_path(location)
elif "/" in location:
# mf:abcdefjhijklm/name
resource_key, path = location.split('/', 2)
parent_folder = self.get_resource_by_key(resource_key)
if not isinstance(parent_folder, Folder):
raise NotAFolderError(resource_key)
# perform additional lookup by path
result = self.get_resource_by_path(
path, folder_key=parent_folder['folderkey'])
else:
# mf:abcdefjhijklm
result = self.get_resource_by_key(location)
return result |
Return resource by quick_key/folder_key.
key -- quick_key or folder_key
def get_resource_by_key(self, resource_key):
"""Return resource by quick_key/folder_key.
key -- quick_key or folder_key
"""
# search for quick_key by default
lookup_order = ["quick_key", "folder_key"]
if len(resource_key) == FOLDER_KEY_LENGTH:
lookup_order = ["folder_key", "quick_key"]
resource = None
for lookup_key in lookup_order:
try:
if lookup_key == "folder_key":
info = self.api.folder_get_info(folder_key=resource_key)
resource = Folder(info['folder_info'])
elif lookup_key == "quick_key":
info = self.api.file_get_info(quick_key=resource_key)
resource = File(info['file_info'])
except MediaFireApiError:
# TODO: Check response code
pass
if resource:
break
if not resource:
raise ResourceNotFoundError(resource_key)
return resource |
Return resource by remote path.
path -- remote path
Keyword arguments:
folder_key -- what to use as the root folder (None for root)
def get_resource_by_path(self, path, folder_key=None):
"""Return resource by remote path.
path -- remote path
Keyword arguments:
folder_key -- what to use as the root folder (None for root)
"""
logger.debug("resolving %s", path)
# remove empty path components
path = posixpath.normpath(path)
components = [t for t in path.split(posixpath.sep) if t != '']
if not components:
# request for root
return Folder(
self.api.folder_get_info(folder_key)['folder_info']
)
resource = None
for component in components:
exists = False
for item in self._folder_get_content_iter(folder_key):
name = item['name'] if 'name' in item else item['filename']
if name == component:
exists = True
if components[-1] != component:
# still have components to go through
if 'filename' in item:
# found a file, expected a directory
raise NotAFolderError(item['filename'])
folder_key = item['folderkey']
else:
# found the leaf
resource = item
break
if resource is not None:
break
if not exists:
# intermediate component does not exist - bailing out
break
if resource is None:
raise ResourceNotFoundError(path)
if "quickkey" in resource:
file_info = self.api.file_get_info(
resource['quickkey'])['file_info']
result = File(file_info)
elif "folderkey" in resource:
folder_info = self.api.folder_get_info(
resource['folderkey'])['folder_info']
result = Folder(folder_info)
return result |
Iterator for api.folder_get_content
def _folder_get_content_iter(self, folder_key=None):
"""Iterator for api.folder_get_content"""
lookup_params = [
{'content_type': 'folders', 'node': 'folders'},
{'content_type': 'files', 'node': 'files'}
]
for param in lookup_params:
more_chunks = True
chunk = 0
while more_chunks:
chunk += 1
content = self.api.folder_get_content(
content_type=param['content_type'], chunk=chunk,
folder_key=folder_key)['folder_content']
# empty folder/file list
if not content[param['node']]:
break
# no next page
if content['more_chunks'] == 'no':
more_chunks = False
for resource_info in content[param['node']]:
yield resource_info |
Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
def get_folder_contents_iter(self, uri):
"""Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource['folderkey']
for item in self._folder_get_content_iter(folder_key):
if 'filename' in item:
# Work around https://mediafire.mantishub.com/view.php?id=5
# TODO: remove in 1.0
if ".patch." in item['filename']:
continue
yield File(item)
elif 'name' in item:
yield Folder(item) |
Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders.
def create_folder(self, uri, recursive=False):
"""Create folder.
uri -- MediaFire URI
Keyword arguments:
recursive -- set to True to create intermediate folders.
"""
logger.info("Creating %s", uri)
# check that folder exists already
try:
resource = self.get_resource_by_uri(uri)
if isinstance(resource, Folder):
return resource
else:
raise NotAFolderError(uri)
except ResourceNotFoundError:
pass
location = self._parse_uri(uri)
folder_name = posixpath.basename(location)
parent_uri = 'mf://' + posixpath.dirname(location)
try:
parent_node = self.get_resource_by_uri(parent_uri)
if not isinstance(parent_node, Folder):
raise NotAFolderError(parent_uri)
parent_key = parent_node['folderkey']
except ResourceNotFoundError:
if recursive:
result = self.create_folder(parent_uri, recursive=True)
parent_key = result['folderkey']
else:
raise
# We specify exact location, so don't allow duplicates
result = self.api.folder_create(
folder_name, parent_key=parent_key, action_on_duplicate='skip')
logger.info("Created folder '%s' [mf:%s]",
result['name'], result['folder_key'])
return self.get_resource_by_key(result['folder_key']) |
Delete folder.
uri -- MediaFire folder URI
Keyword arguments:
purge -- delete the folder without sending it to Trash
def delete_folder(self, uri, purge=False):
"""Delete folder.
uri -- MediaFire folder URI
Keyword arguments:
purge -- delete the folder without sending it to Trash
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if not isinstance(resource, Folder):
raise ValueError("Folder expected, got {}".format(type(resource)))
if purge:
func = self.api.folder_purge
else:
func = self.api.folder_delete
try:
result = func(resource['folderkey'])
except MediaFireApiError as err:
if err.code == 100:
logger.debug(
"Delete folder returns error 900 but folder is deleted: "
"http://forum.mediafiredev.com/showthread.php?129")
result = {}
else:
raise
return result |
Delete file.
uri -- MediaFire file URI
Keyword arguments:
purge -- delete the file without sending it to Trash.
def delete_file(self, uri, purge=False):
"""Delete file.
uri -- MediaFire file URI
Keyword arguments:
purge -- delete the file without sending it to Trash.
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if not isinstance(resource, File):
raise ValueError("File expected, got {}".format(type(resource)))
if purge:
func = self.api.file_purge
else:
func = self.api.file_delete
return func(resource['quickkey']) |
Delete file or folder
uri -- mediafire URI
Keyword arguments:
purge -- delete the resource without sending it to Trash.
def delete_resource(self, uri, purge=False):
"""Delete file or folder
uri -- mediafire URI
Keyword arguments:
purge -- delete the resource without sending it to Trash.
"""
try:
resource = self.get_resource_by_uri(uri)
except ResourceNotFoundError:
# Nothing to remove
return None
if isinstance(resource, File):
result = self.delete_file(uri, purge)
elif isinstance(resource, Folder):
result = self.delete_folder(uri, purge)
else:
raise ValueError('Unsupported resource: {}'.format(type(resource)))
return result |
Prepare Upload object, resolve paths
def _prepare_upload_info(self, source, dest_uri):
"""Prepare Upload object, resolve paths"""
try:
dest_resource = self.get_resource_by_uri(dest_uri)
except ResourceNotFoundError:
dest_resource = None
is_fh = hasattr(source, 'read')
folder_key = None
name = None
if dest_resource:
if isinstance(dest_resource, File):
folder_key = dest_resource['parent_folderkey']
name = dest_resource['filename']
elif isinstance(dest_resource, Folder):
if is_fh:
raise ValueError("Cannot determine target file name")
basename = posixpath.basename(source)
dest_uri = posixpath.join(dest_uri, basename)
try:
result = self.get_resource_by_uri(dest_uri)
if isinstance(result, Folder):
raise ValueError("Target is a folder (file expected)")
folder_key = result.get('parent_folderkey', None)
name = result['filename']
except ResourceNotFoundError:
# ok, neither a file nor folder, proceed
folder_key = dest_resource['folderkey']
name = basename
else:
raise Exception("Unknown resource type")
else:
# get parent resource
parent_uri = '/'.join(dest_uri.split('/')[0:-1])
result = self.get_resource_by_uri(parent_uri)
if not isinstance(result, Folder):
raise NotAFolderError("Parent component is not a folder")
folder_key = result['folderkey']
name = posixpath.basename(dest_uri)
return folder_key, name |
Upload file to MediaFire.
source -- path to the file or a file-like object (e.g. io.BytesIO)
dest_uri -- MediaFire Resource URI
def upload_file(self, source, dest_uri):
"""Upload file to MediaFire.
source -- path to the file or a file-like object (e.g. io.BytesIO)
dest_uri -- MediaFire Resource URI
"""
folder_key, name = self._prepare_upload_info(source, dest_uri)
is_fh = hasattr(source, 'read')
fd = None
try:
if is_fh:
# Re-using filehandle
fd = source
else:
# Handling fs open/close
fd = open(source, 'rb')
return MediaFireUploader(self.api).upload(
fd, name, folder_key=folder_key,
action_on_duplicate='replace')
finally:
# Close filehandle if we opened it
if fd and not is_fh:
fd.close() |
Download file from MediaFire.
src_uri -- MediaFire file URI to download
target -- download path or file-like object in write mode
def download_file(self, src_uri, target):
"""Download file from MediaFire.
src_uri -- MediaFire file URI to download
target -- download path or file-like object in write mode
"""
resource = self.get_resource_by_uri(src_uri)
if not isinstance(resource, File):
raise MediaFireError("Only files can be downloaded")
quick_key = resource['quickkey']
result = self.api.file_get_links(quick_key=quick_key,
link_type='direct_download')
direct_download = result['links'][0]['direct_download']
# Force download over HTTPS
direct_download = direct_download.replace('http:', 'https:')
name = resource['filename']
target_is_filehandle = True if hasattr(target, 'write') else False
if not target_is_filehandle:
if (os.path.exists(target) and os.path.isdir(target)) or \
target.endswith("/"):
target = os.path.join(target, name)
if not os.path.isdir(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
logger.info("Downloading %s to %s", src_uri, target)
response = requests.get(direct_download, stream=True)
try:
if target_is_filehandle:
out_fd = target
else:
out_fd = open(target, 'wb')
checksum = hashlib.sha256()
for chunk in response.iter_content(chunk_size=4096):
if chunk:
out_fd.write(chunk)
checksum.update(chunk)
checksum_hex = checksum.hexdigest().lower()
if checksum_hex != resource['hash']:
raise DownloadError("Hash mismatch ({} != {})".format(
resource['hash'], checksum_hex))
logger.info("Download completed successfully")
finally:
if not target_is_filehandle:
out_fd.close() |
Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
def update_file_metadata(self, uri, filename=None, description=None,
mtime=None, privacy=None):
"""Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, File):
raise ValueError('Expected File, got {}'.format(type(resource)))
result = self.api.file_update(resource['quickkey'], filename=filename,
description=description,
mtime=mtime, privacy=privacy)
return result |
Update folder metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
recursive -- update folder privacy recursively
def update_folder_metadata(self, uri, foldername=None, description=None,
mtime=None, privacy=None,
privacy_recursive=None):
"""Update folder metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
recursive -- update folder privacy recursively
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise ValueError('Expected Folder, got {}'.format(type(resource)))
result = self.api.folder_update(resource['folderkey'],
foldername=foldername,
description=description,
mtime=mtime,
privacy=privacy,
privacy_recursive=privacy_recursive)
return result |
Parse and validate MediaFire URI.
def _parse_uri(uri):
"""Parse and validate MediaFire URI."""
tokens = urlparse(uri)
if tokens.netloc != '':
logger.error("Invalid URI: %s", uri)
raise ValueError("MediaFire URI format error: "
"host should be empty - mf:///path")
if tokens.scheme != '' and tokens.scheme != URI_SCHEME:
raise ValueError("MediaFire URI format error: "
"must start with 'mf:' or '/'")
return posixpath.normpath(tokens.path) |
The clean stats from all the hosts reporting to this host.
def merged(self):
'''The clean stats from all the hosts reporting to this host.'''
stats = {}
for topic in self.client.topics()['topics']:
for producer in self.client.lookup(topic)['producers']:
hostname = producer['broadcast_address']
port = producer['http_port']
host = '%s_%s' % (hostname, port)
stats[host] = nsqd.Client(
'http://%s:%s/' % (hostname, port)).clean_stats()
return stats |
All the raw, unaggregated stats (with duplicates).
def raw(self):
'''All the raw, unaggregated stats (with duplicates).'''
topic_keys = (
'message_count',
'depth',
'backend_depth',
'paused'
)
channel_keys = (
'in_flight_count',
'timeout_count',
'paused',
'deferred_count',
'message_count',
'depth',
'backend_depth',
'requeue_count'
)
for host, stats in self.merged.items():
for topic, stats in stats.get('topics', {}).items():
prefix = 'host.%s.topic.%s' % (host, topic)
for key in topic_keys:
value = int(stats.get(key, -1))
yield (
'host.%s.topic.%s.%s' % (host, topic, key),
value,
False
)
yield (
'topic.%s.%s' % (topic, key),
value,
True
)
yield (
'topics.%s' % key,
value,
True
)
for chan, stats in stats.get('channels', {}).items():
data = {
key: int(stats.get(key, -1)) for key in channel_keys
}
data['clients'] = len(stats.get('clients', []))
for key, value in data.items():
yield (
'host.%s.topic.%s.channel.%s.%s' % (host, topic, chan, key),
value,
False
)
yield (
'host.%s.topic.%s.channels.%s' % (host, topic, key),
value,
True
)
yield (
'topic.%s.channels.%s' % (topic, key),
value,
True
)
yield (
'channels.%s' % key,
value,
True
) |
Stats that have been aggregated appropriately.
def stats(self):
'''Stats that have been aggregated appropriately.'''
data = Counter()
for name, value, aggregated in self.raw:
if aggregated:
data['%s.max' % name] = max(data['%s.max' % name], value)
data['%s.total' % name] += value
else:
data[name] = value
return sorted(data.items()) |
Return the current python source line.
def get_curline():
"""Return the current python source line."""
if Frame:
frame = Frame.get_selected_python_frame()
if frame:
line = ''
f = frame.get_pyop()
if f and not f.is_optimized_out():
cwd = os.path.join(os.getcwd(), '')
fname = f.filename()
if cwd in fname:
fname = fname[len(cwd):]
try:
line = f.current_line()
except IOError:
pass
if line:
# Use repr(line) to avoid UnicodeDecodeError on the
# following print invocation.
line = repr(line).strip("'")
line = line[:-2] if line.endswith(r'\n') else line
return ('-> %s(%s): %s' % (fname,
f.current_line_num(), line))
return '' |
Subscribe connection and manipulate its RDY state
def reconnected(self, conn):
'''Subscribe connection and manipulate its RDY state'''
conn.sub(self._topic, self._channel)
conn.rdy(1) |
Distribute the ready state across all of the connections
def distribute_ready(self):
'''Distribute the ready state across all of the connections'''
connections = [c for c in self.connections() if c.alive()]
if len(connections) > self._max_in_flight:
raise NotImplementedError(
'Max in flight must be greater than number of connections')
else:
# Distribute the ready count evenly among the connections
for count, conn in distribute(self._max_in_flight, connections):
# We cannot exceed the maximum RDY count for a connection
if count > conn.max_rdy_count:
logger.info(
'Using max_rdy_count (%i) instead of %i for %s RDY',
conn.max_rdy_count, count, conn)
count = conn.max_rdy_count
logger.info('Sending RDY %i to %s', count, conn)
conn.rdy(count) |
Determine whether or not we need to redistribute the ready state
def needs_distribute_ready(self):
'''Determine whether or not we need to redistribute the ready state'''
# Try to pre-empty starvation by comparing current RDY against
# the last value sent.
alive = [c for c in self.connections() if c.alive()]
if any(c.ready <= (c.last_ready_sent * 0.25) for c in alive):
return True |
Read some number of messages
def read(self):
'''Read some number of messages'''
found = Client.read(self)
# Redistribute our ready state if necessary
if self.needs_distribute_ready():
self.distribute_ready()
# Finally, return all the results we've read
return found |
Profile the block
def profiler():
'''Profile the block'''
import cProfile
import pstats
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
ps = pstats.Stats(pr).sort_stats('tottime')
ps.print_stats() |
Generator for count messages of the provided size
def messages(count, size):
'''Generator for count messages of the provided size'''
import string
# Make sure we have at least 'size' letters
letters = islice(cycle(chain(string.lowercase, string.uppercase)), size)
return islice(cycle(''.join(l) for l in permutations(letters, size)), count) |
Collect data into fixed-length chunks or blocks
def grouper(iterable, n):
'''Collect data into fixed-length chunks or blocks'''
args = [iter(iterable)] * n
for group in izip_longest(fillvalue=None, *args):
group = [g for g in group if g != None]
yield group |
Basic benchmark
def basic(topic='topic', channel='channel', count=1e6, size=10, gevent=False,
max_in_flight=2500, profile=False):
'''Basic benchmark'''
if gevent:
from gevent import monkey
monkey.patch_all()
# Check the types of the arguments
count = int(count)
size = int(size)
max_in_flight = int(max_in_flight)
from nsq.http import nsqd
from nsq.reader import Reader
print 'Publishing messages...'
for batch in grouper(messages(count, size), 1000):
nsqd.Client('http://localhost:4151').mpub(topic, batch)
print 'Consuming messages'
client = Reader(topic, channel, nsqd_tcp_addresses=['localhost:4150'],
max_in_flight=max_in_flight)
with closing(client):
start = -time.time()
if profile:
with profiler():
for message in islice(client, count):
message.fin()
else:
for message in islice(client, count):
message.fin()
start += time.time()
print 'Finished %i messages in %fs (%5.2f messages / second)' % (
count, start, count / start) |
Read a stream of floats and give summary statistics
def stats():
'''Read a stream of floats and give summary statistics'''
import re
import sys
import math
values = []
for line in sys.stdin:
values.extend(map(float, re.findall(r'\d+\.?\d+', line)))
mean = sum(values) / len(values)
variance = sum((val - mean) ** 2 for val in values) / len(values)
print '%3i items; mean: %10.5f; std-dev: %10.5f' % (
len(values), mean, math.sqrt(variance)) |
Whether or not enough time has passed since the last failure
def ready(self):
'''Whether or not enough time has passed since the last failure'''
if self._last_failed:
delta = time.time() - self._last_failed
return delta >= self.backoff()
return True |
Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
def execute_status(args, root_dir=None):
"""Print the status of the daemon.
This function displays the current status of the daemon as well
as the whole queue and all available information about every entry
in the queue.
`terminaltables` is used to format and display the queue contents.
`colorclass` is used to color format the various items in the queue.
Args:
root_dir (string): The path to the root directory the daemon is running in.
"""
status = command_factory('status')({}, root_dir=root_dir)
# First rows, showing daemon status
if status['status'] == 'running':
status['status'] = Color('{autogreen}' + '{}'.format(status['status']) + '{/autogreen}')
elif status['status'] in ['paused']:
status['status'] = Color('{autoyellow}' + '{}'.format(status['status']) + '{/autoyellow}')
print('Daemon: {}\n'.format(status['status']))
# Handle queue data
data = status['data']
if isinstance(data, str):
print(data)
elif isinstance(data, dict):
# Format incomming data to be compatible with Terminaltables
formatted_data = []
formatted_data.append(['Index', 'Status', 'Code',
'Command', 'Path', 'Start', 'End'])
for key, entry in sorted(data.items(), key=operator.itemgetter(0)):
formatted_data.append(
[
'#{}'.format(key),
entry['status'],
'{}'.format(entry['returncode']),
entry['command'],
entry['path'],
entry['start'],
entry['end']
]
)
# Create AsciiTable instance and define style
table = AsciiTable(formatted_data)
table.outer_border = False
table.inner_column_border = False
terminal_width = terminal_size()
customWidth = table.column_widths
# If the text is wider than the actual terminal size, we
# compute a new size for the Command and Path column.
if (reduce(lambda a, b: a+b, table.column_widths) + 10) > terminal_width[0]:
# We have to subtract 14 because of table paddings
left_space = math.floor((terminal_width[0] - customWidth[0] - customWidth[1] - customWidth[2] - customWidth[5] - customWidth[6] - 14)/2)
if customWidth[3] < left_space:
customWidth[4] = 2*left_space - customWidth[3]
elif customWidth[4] < left_space:
customWidth[3] = 2*left_space - customWidth[4]
else:
customWidth[3] = left_space
customWidth[4] = left_space
# Format long strings to match the console width
for i, entry in enumerate(table.table_data):
for j, string in enumerate(entry):
max_width = customWidth[j]
wrapped_string = '\n'.join(wrap(string, max_width))
if j == 1:
if wrapped_string == 'done' or wrapped_string == 'running' or wrapped_string == 'paused':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string in ['queued', 'stashed']:
wrapped_string = Color('{autoyellow}' + '{}'.format(wrapped_string) + '{/autoyellow}')
elif wrapped_string in ['failed', 'stopping', 'killing']:
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
elif j == 2:
if wrapped_string == '0' and wrapped_string != 'Code':
wrapped_string = Color('{autogreen}' + '{}'.format(wrapped_string) + '{/autogreen}')
elif wrapped_string != '0' and wrapped_string != 'Code':
wrapped_string = Color('{autored}' + '{}'.format(wrapped_string) + '{/autored}')
table.table_data[i][j] = wrapped_string
print(table.table)
print('') |
Print the current log file.
Args:
args['keys'] (int): If given, we only look at the specified processes.
root_dir (string): The path to the root directory the daemon is running in.
def execute_log(args, root_dir):
"""Print the current log file.
Args:
args['keys'] (int): If given, we only look at the specified processes.
root_dir (string): The path to the root directory the daemon is running in.
"""
# Print the logs of all specified processes
if args.get('keys'):
config_dir = os.path.join(root_dir, '.config/pueue')
queue_path = os.path.join(config_dir, 'queue')
if os.path.exists(queue_path):
queue_file = open(queue_path, 'rb')
try:
queue = pickle.load(queue_file)
except Exception:
print('Queue log file seems to be corrupted. Aborting.')
return
queue_file.close()
else:
print('There is no queue log file. Aborting.')
return
for key in args.get('keys'):
# Check if there is an entry with this key
if queue.get(key) and queue[key]['status'] in ['failed', 'done']:
entry = queue[key]
print('Log of entry: {}'.format(key))
print('Returncode: {}'.format(entry['returncode']))
print('Command: {}'.format(entry['command']))
print('Path: {}'.format(entry['path']))
print('Start: {}, End: {} \n'.format(entry['start'], entry['end']))
# Write STDERR
if len(entry['stderr']) > 0:
print(Color('{autored}Stderr output: {/autored}\n ') + entry['stderr'])
# Write STDOUT
if len(entry['stdout']) > 0:
print(Color('{autogreen}Stdout output: {/autogreen}\n ') + entry['stdout'])
else:
print('No finished process with key {}.'.format(key))
# Print the log of all processes
else:
log_path = os.path.join(root_dir, '.local/share/pueue/queue.log')
log_file = open(log_path, 'r')
print(log_file.read()) |
Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
def execute_show(args, root_dir):
"""Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
"""
key = None
if args.get('key'):
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if key not in status['data'] or status['data'][key]['status'] != 'running':
print('No running process with this key, use `log` to show finished processes.')
return
# In case no key provided, we take the oldest running process
else:
status = command_factory('status')({}, root_dir=root_dir)
if isinstance(status['data'], str):
print(status['data'])
return
for k in sorted(status['data'].keys()):
if status['data'][k]['status'] == 'running':
key = k
break
if key is None:
print('No running process, use `log` to show finished processes.')
return
config_dir = os.path.join(root_dir, '.config/pueue')
# Get current pueueSTDout file from tmp
stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))
stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))
stdoutDescriptor = open(stdoutFile, 'r')
stderrDescriptor = open(stderrFile, 'r')
running = True
# Continually print output with curses or just print once
if args['watch']:
# Initialize curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(2)
stdscr.keypad(True)
stdscr.refresh()
try:
# Update output every two seconds
while running:
stdscr.clear()
stdoutDescriptor.seek(0)
message = stdoutDescriptor.read()
stdscr.addstr(0, 0, message)
stdscr.refresh()
time.sleep(2)
except Exception:
# Curses cleanup
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
else:
print('Stdout output:\n')
stdoutDescriptor.seek(0)
print(get_descriptor_output(stdoutDescriptor, key))
print('\n\nStderr output:\n')
stderrDescriptor.seek(0)
print(get_descriptor_output(stderrDescriptor, key)) |
Fetches a song track by given ID.
:param track_id: the track ID.
:type track_id: str
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#tracks-track_id`.
def fetch_track(self, track_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches a song track by given ID.
:param track_id: the track ID.
:type track_id: str
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#tracks-track_id`.
'''
url = 'https://api.kkbox.com/v1.1/tracks/%s' % track_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) |
Show a specific indicator by id
:param user: feed username
:param feed: feed name
:param id: indicator endpoint id [INT]
:return: dict
Example:
ret = Indicator.show('csirtgadgets','port-scanners', '1234')
def show(self, user, feed, id):
"""
Show a specific indicator by id
:param user: feed username
:param feed: feed name
:param id: indicator endpoint id [INT]
:return: dict
Example:
ret = Indicator.show('csirtgadgets','port-scanners', '1234')
"""
uri = '/users/{}/feeds/{}/indicators/{}'.format(user, feed, id)
return self.client.get(uri) |
Submit action on the Indicator object
:return: Indicator Object
def create(self):
"""
Submit action on the Indicator object
:return: Indicator Object
"""
uri = '/users/{0}/feeds/{1}/indicators'\
.format(self.user, self.feed)
data = {
"indicator": json.loads(str(self.indicator)),
"comment": self.comment,
"content": self.content
}
if self.attachment:
attachment = self._file_to_attachment(
self.attachment, filename=self.attachment_name)
data['attachment'] = {
'data': attachment['data'],
'filename': attachment['filename']
}
if not data['indicator'].get('indicator'):
data['indicator']['indicator'] = attachment['sha1']
if not data['indicator'].get('indicator'):
raise Exception('Missing indicator')
return self.client.post(uri, data) |
Submit action against the IndicatorBulk endpoint
:param indicators: list of Indicator Objects
:param user: feed username
:param feed: feed name
:return: list of Indicator Objects submitted
from csirtgsdk.client import Client
from csirtgsdk.indicator import Indicator
remote = 'https://csirtg.io/api'
token = ''
verify_ssl = True
i = {
'indicator': 'example.com',
'feed': 'test',
'user': 'admin',
'comment': 'this is a test',
}
data = []
cli = Client(remote=remote, token=token, verify_ssl=verify_ssl)
for x in range(0, 5):
data.append(
Indicator(cli, i)
)
ret = cli.submit_bulk(data, 'csirtgadgets', 'test-feed')
def create_bulk(self, indicators, user, feed):
from .constants import API_VERSION
if API_VERSION == '1':
print("create_bulk currently un-avail with APIv1")
raise SystemExit
"""
Submit action against the IndicatorBulk endpoint
:param indicators: list of Indicator Objects
:param user: feed username
:param feed: feed name
:return: list of Indicator Objects submitted
from csirtgsdk.client import Client
from csirtgsdk.indicator import Indicator
remote = 'https://csirtg.io/api'
token = ''
verify_ssl = True
i = {
'indicator': 'example.com',
'feed': 'test',
'user': 'admin',
'comment': 'this is a test',
}
data = []
cli = Client(remote=remote, token=token, verify_ssl=verify_ssl)
for x in range(0, 5):
data.append(
Indicator(cli, i)
)
ret = cli.submit_bulk(data, 'csirtgadgets', 'test-feed')
"""
uri = '/users/{0}/feeds/{1}/indicators_bulk'.format(user, feed)
data = {
'indicators': [
{
'indicator': i.args.indicator,
'feed_id': i.args.feed,
'tag_list': i.args.tags,
"description": i.args.description,
"portlist": i.args.portlist,
"protocol": i.args.protocol,
'firsttime': i.args.firsttime,
'lasttime': i.args.lasttime,
'portlist_src': i.args.portlist_src,
'comment': {
'content': i.args.comment
},
'rdata': i.args.rdata,
'rtype': i.args.rtype,
'content': i.args.content,
'provider': i.args.provider,
} for i in indicators
]
}
return self.client.post(uri, data) |
An alias for ``bind_key(event_type, ROOT_WINDOW, key_string, cb)``.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
def bind_global_key(conn, event_type, key_string, cb):
"""
An alias for ``bind_key(event_type, ROOT_WINDOW, key_string, cb)``.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
"""
root = conn.get_setup().roots[0].root
return bind_key(conn, event_type, root, key_string, cb) |
Binds a function ``cb`` to a particular key press ``key_string`` on a
window ``wid``. Whether it's a key release or key press binding is
determined by ``event_type``.
``bind_key`` will automatically hook into the ``event`` module's dispatcher,
so that if you're using ``event.main()`` for your main loop, everything
will be taken care of for you.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param wid: The window to bind the key grab to.
:type wid: int
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
def bind_key(conn, event_type, wid, key_string, cb):
"""
Binds a function ``cb`` to a particular key press ``key_string`` on a
window ``wid``. Whether it's a key release or key press binding is
determined by ``event_type``.
``bind_key`` will automatically hook into the ``event`` module's dispatcher,
so that if you're using ``event.main()`` for your main loop, everything
will be taken care of for you.
:param event_type: Either 'KeyPress' or 'KeyRelease'.
:type event_type: str
:param wid: The window to bind the key grab to.
:type wid: int
:param key_string: A string of the form 'Mod1-Control-a'.
Namely, a list of zero or more modifiers separated by
'-', followed by a single non-modifier key.
:type key_string: str
:param cb: A first class function with no parameters.
:type cb: function
:return: True if the binding was successful, False otherwise.
:rtype: bool
"""
assert event_type in ('KeyPress', 'KeyRelease')
mods, kc = parse_keystring(conn, key_string)
key = (wid, mods, kc)
if not kc:
print("Could not find a keycode for " + key_string)
return False
if not __keygrabs[key] and not grab_key(conn, wid, mods, kc):
return False
__keybinds[key].append(cb)
__keygrabs[key] += 1
return True |
A utility function to turn strings like 'Mod1+Mod4+a' into a pair
corresponding to its modifiers and keycode.
:param key_string: String starting with zero or more modifiers followed
by exactly one key press.
Available modifiers: Control, Mod1, Mod2, Mod3, Mod4,
Mod5, Shift, Lock
:type key_string: str
:return: Tuple of modifier mask and keycode
:rtype: (mask, int)
def parse_keystring(conn, key_string):
"""
A utility function to turn strings like 'Mod1+Mod4+a' into a pair
corresponding to its modifiers and keycode.
:param key_string: String starting with zero or more modifiers followed
by exactly one key press.
Available modifiers: Control, Mod1, Mod2, Mod3, Mod4,
Mod5, Shift, Lock
:type key_string: str
:return: Tuple of modifier mask and keycode
:rtype: (mask, int)
"""
# FIXME this code is temporary hack, requires better abstraction
from PyQt5.QtGui import QKeySequence
from PyQt5.QtCore import Qt
from .qt_keycodes import KeyTbl, ModsTbl
keysequence = QKeySequence(key_string)
ks = keysequence[0]
# Calculate the modifiers
mods = Qt.NoModifier
qtmods = Qt.NoModifier
modifiers = 0
if (ks & Qt.ShiftModifier == Qt.ShiftModifier):
mods |= ModsTbl.index(Qt.ShiftModifier)
qtmods |= Qt.ShiftModifier.real
modifiers |= getattr(xproto.KeyButMask, "Shift", 0)
if (ks & Qt.AltModifier == Qt.AltModifier):
mods |= ModsTbl.index(Qt.AltModifier)
qtmods |= Qt.AltModifier.real
modifiers |= getattr(xproto.KeyButMask, "Mod1", 0)
if (ks & Qt.ControlModifier == Qt.ControlModifier):
mods |= ModsTbl.index(Qt.ControlModifier)
qtmods |= Qt.ControlModifier.real
modifiers |= getattr(xproto.KeyButMask, "Control", 0)
# Calculate the keys
qtkeys = ks ^ qtmods
key = QKeySequence(Qt.Key(qtkeys)).toString().lower()
keycode = lookup_string(conn, key)
return modifiers, keycode
# Fallback logic
modifiers = 0
keycode = None
key_string = "Shift+Control+A"
for part in key_string.split('+'):
if hasattr(xproto.KeyButMask, part):
modifiers |= getattr(xproto.KeyButMask, part)
else:
if len(part) == 1:
part = part.lower()
keycode = lookup_string(conn, part)
return modifiers, keycode |
Finds the keycode associated with a string representation of a keysym.
:param kstr: English representation of a keysym.
:return: Keycode, if one exists.
:rtype: int
def lookup_string(conn, kstr):
"""
Finds the keycode associated with a string representation of a keysym.
:param kstr: English representation of a keysym.
:return: Keycode, if one exists.
:rtype: int
"""
if kstr in keysyms:
return get_keycode(conn, keysyms[kstr])
elif len(kstr) > 1 and kstr.capitalize() in keysyms:
return get_keycode(conn, keysyms[kstr.capitalize()])
return None |
Return a keyboard mapping cookie that can be used to fetch the table of
keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
def get_keyboard_mapping(conn):
"""
Return a keyboard mapping cookie that can be used to fetch the table of
keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode(conn)
return conn.core.GetKeyboardMapping(mn, mx - mn + 1) |
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
def get_keyboard_mapping_unchecked(conn):
"""
Return an unchecked keyboard mapping cookie that can be used to fetch the
table of keysyms in the current X environment.
:rtype: xcb.xproto.GetKeyboardMappingCookie
"""
mn, mx = get_min_max_keycode()
return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1) |
Get the keysym associated with a particular keycode in the current X
environment. Although we get a list of keysyms from X in
'get_keyboard_mapping', this list is really a table with
'keysys_per_keycode' columns and ``mx - mn`` rows (where ``mx`` is the
maximum keycode and ``mn`` is the minimum keycode).
Thus, the index for a keysym given a keycode is:
``(keycode - mn) * keysyms_per_keycode + col``.
In most cases, setting ``col`` to 0 will work.
Witness the utter complexity:
http://tronche.com/gui/x/xlib/input/keyboard-encoding.html
You may also pass in your own keyboard mapping using the ``kbmap``
parameter, but xpybutil maintains an up-to-date version of this so you
shouldn't have to.
:param keycode: A physical key represented by an integer.
:type keycode: int
:param col: The column in the keysym table to use.
Unless you know what you're doing, just use 0.
:type col: int
:param kbmap: The keyboard mapping to use.
:type kbmap: xcb.xproto.GetKeyboardMapingReply
def get_keysym(conn, keycode, col=0, kbmap=None):
"""
Get the keysym associated with a particular keycode in the current X
environment. Although we get a list of keysyms from X in
'get_keyboard_mapping', this list is really a table with
'keysys_per_keycode' columns and ``mx - mn`` rows (where ``mx`` is the
maximum keycode and ``mn`` is the minimum keycode).
Thus, the index for a keysym given a keycode is:
``(keycode - mn) * keysyms_per_keycode + col``.
In most cases, setting ``col`` to 0 will work.
Witness the utter complexity:
http://tronche.com/gui/x/xlib/input/keyboard-encoding.html
You may also pass in your own keyboard mapping using the ``kbmap``
parameter, but xpybutil maintains an up-to-date version of this so you
shouldn't have to.
:param keycode: A physical key represented by an integer.
:type keycode: int
:param col: The column in the keysym table to use.
Unless you know what you're doing, just use 0.
:type col: int
:param kbmap: The keyboard mapping to use.
:type kbmap: xcb.xproto.GetKeyboardMapingReply
"""
if kbmap is None:
kbmap = __kbmap
mn, mx = get_min_max_keycode(conn)
per = kbmap.keysyms_per_keycode
ind = (keycode - mn) * per + col
return kbmap.keysyms[ind] |
Given a keysym, find the keycode mapped to it in the current X environment.
It is necessary to search the keysym table in order to do this, including
all columns.
:param keysym: An X keysym.
:return: A keycode or None if one could not be found.
:rtype: int
def get_keycode(conn, keysym):
"""
Given a keysym, find the keycode mapped to it in the current X environment.
It is necessary to search the keysym table in order to do this, including
all columns.
:param keysym: An X keysym.
:return: A keycode or None if one could not be found.
:rtype: int
"""
mn, mx = get_min_max_keycode(conn)
cols = __kbmap.keysyms_per_keycode
for i in range(mn, mx + 1):
for j in range(0, cols):
ks = get_keysym(conn, i, col=j)
if ks == keysym:
return i
return None |
Fetches and creates the keycode -> modifier mask mapping. Typically, you
shouldn't have to use this---xpybutil will keep this up to date if it
changes.
This function may be useful in that it should closely replicate the output
of the ``xmodmap`` command. For example:
::
keymods = get_keys_to_mods()
for kc in sorted(keymods, key=lambda kc: keymods[kc]):
print keymods[kc], hex(kc), get_keysym_string(get_keysym(kc))
Which will very closely replicate ``xmodmap``. I'm not getting precise
results quite yet, but I do believe I'm getting at least most of what
matters. (i.e., ``xmodmap`` returns valid keysym strings for some that
I cannot.)
:return: A dict mapping from keycode to modifier mask.
:rtype: dict
def get_keys_to_mods(conn):
"""
Fetches and creates the keycode -> modifier mask mapping. Typically, you
shouldn't have to use this---xpybutil will keep this up to date if it
changes.
This function may be useful in that it should closely replicate the output
of the ``xmodmap`` command. For example:
::
keymods = get_keys_to_mods()
for kc in sorted(keymods, key=lambda kc: keymods[kc]):
print keymods[kc], hex(kc), get_keysym_string(get_keysym(kc))
Which will very closely replicate ``xmodmap``. I'm not getting precise
results quite yet, but I do believe I'm getting at least most of what
matters. (i.e., ``xmodmap`` returns valid keysym strings for some that
I cannot.)
:return: A dict mapping from keycode to modifier mask.
:rtype: dict
"""
mm = xproto.ModMask
modmasks = [mm.Shift, mm.Lock, mm.Control,
mm._1, mm._2, mm._3, mm._4, mm._5] # order matters
mods = conn.core.GetModifierMapping().reply()
res = {}
keyspermod = mods.keycodes_per_modifier
for mmi in range(0, len(modmasks)):
row = mmi * keyspermod
for kc in mods.keycodes[row:row + keyspermod]:
res[kc] = modmasks[mmi]
return res |
Takes a ``state`` (typically found in key press or button press events)
and returns a string list representation of the modifiers that were pressed
when generating the event.
:param state: Typically from ``some_event.state``.
:return: List of modifier string representations.
:rtype: [str]
def get_modifiers(state):
"""
Takes a ``state`` (typically found in key press or button press events)
and returns a string list representation of the modifiers that were pressed
when generating the event.
:param state: Typically from ``some_event.state``.
:return: List of modifier string representations.
:rtype: [str]
"""
ret = []
if state & xproto.ModMask.Shift:
ret.append('Shift')
if state & xproto.ModMask.Lock:
ret.append('Lock')
if state & xproto.ModMask.Control:
ret.append('Control')
if state & xproto.ModMask._1:
ret.append('Mod1')
if state & xproto.ModMask._2:
ret.append('Mod2')
if state & xproto.ModMask._3:
ret.append('Mod3')
if state & xproto.ModMask._4:
ret.append('Mod4')
if state & xproto.ModMask._5:
ret.append('Mod5')
if state & xproto.KeyButMask.Button1:
ret.append('Button1')
if state & xproto.KeyButMask.Button2:
ret.append('Button2')
if state & xproto.KeyButMask.Button3:
ret.append('Button3')
if state & xproto.KeyButMask.Button4:
ret.append('Button4')
if state & xproto.KeyButMask.Button5:
ret.append('Button5')
return ret |
Grabs a key for a particular window and a modifiers/key value.
If the grab was successful, return True. Otherwise, return False.
If your client is grabbing keys, it is useful to notify the user if a
key wasn't grabbed. Keyboard shortcuts not responding is disorienting!
Also, this function will grab several keys based on varying modifiers.
Namely, this accounts for all of the "trivial" modifiers that may have
an effect on X events, but probably shouldn't effect key grabbing. (i.e.,
whether num lock or caps lock is on.)
N.B. You should probably be using 'bind_key' or 'bind_global_key' instead.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
def grab_key(conn, wid, modifiers, key):
"""
Grabs a key for a particular window and a modifiers/key value.
If the grab was successful, return True. Otherwise, return False.
If your client is grabbing keys, it is useful to notify the user if a
key wasn't grabbed. Keyboard shortcuts not responding is disorienting!
Also, this function will grab several keys based on varying modifiers.
Namely, this accounts for all of the "trivial" modifiers that may have
an effect on X events, but probably shouldn't effect key grabbing. (i.e.,
whether num lock or caps lock is on.)
N.B. You should probably be using 'bind_key' or 'bind_global_key' instead.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
"""
try:
for mod in TRIVIAL_MODS:
conn.core.GrabKeyChecked(True, wid, modifiers | mod, key, GM.Async,
GM.Async).check()
return True
except xproto.BadAccess:
return False |
Ungrabs a key that was grabbed by ``grab_key``. Similarly, it will return
True on success and False on failure.
When ungrabbing a key, the parameters to this function should be
*precisely* the same as the parameters to ``grab_key``.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
def ungrab_key(conn, wid, modifiers, key):
"""
Ungrabs a key that was grabbed by ``grab_key``. Similarly, it will return
True on success and False on failure.
When ungrabbing a key, the parameters to this function should be
*precisely* the same as the parameters to ``grab_key``.
:param wid: A window identifier.
:type wid: int
:param modifiers: A modifier mask.
:type modifiers: int
:param key: A keycode.
:type key: int
:rtype: bool
"""
try:
for mod in TRIVIAL_MODS:
conn.core.UngrabKeyChecked(key, wid, modifiers | mod).check()
return True
except xproto.BadAccess:
return False |
Whenever the keyboard mapping is changed, this function needs to be called
to update xpybutil's internal representing of the current keysym table.
Indeed, xpybutil will do this for you automatically.
Moreover, if something is changed that affects the current keygrabs,
xpybutil will initiate a regrab with the changed keycode.
:param e: The MappingNotify event.
:type e: xcb.xproto.MappingNotifyEvent
:rtype: void
def update_keyboard_mapping(conn, e):
"""
Whenever the keyboard mapping is changed, this function needs to be called
to update xpybutil's internal representing of the current keysym table.
Indeed, xpybutil will do this for you automatically.
Moreover, if something is changed that affects the current keygrabs,
xpybutil will initiate a regrab with the changed keycode.
:param e: The MappingNotify event.
:type e: xcb.xproto.MappingNotifyEvent
:rtype: void
"""
global __kbmap, __keysmods
newmap = get_keyboard_mapping(conn).reply()
if e is None:
__kbmap = newmap
__keysmods = get_keys_to_mods(conn)
return
if e.request == xproto.Mapping.Keyboard:
changes = {}
for kc in range(*get_min_max_keycode(conn)):
knew = get_keysym(kc, kbmap=newmap)
oldkc = get_keycode(conn, knew)
if oldkc != kc:
changes[oldkc] = kc
__kbmap = newmap
__regrab(changes)
elif e.request == xproto.Mapping.Modifier:
__keysmods = get_keys_to_mods() |
A function that intercepts all key press/release events, and runs
their corresponding callback functions. Nothing much to see here, except
that we must mask out the trivial modifiers from the state in order to
find the right callback.
Callbacks are called in the order that they have been added. (FIFO.)
:param e: A Key{Press,Release} event.
:type e: xcb.xproto.Key{Press,Release}Event
:rtype: bool True if the callback was serviced
def run_keybind_callbacks(e):
"""
A function that intercepts all key press/release events, and runs
their corresponding callback functions. Nothing much to see here, except
that we must mask out the trivial modifiers from the state in order to
find the right callback.
Callbacks are called in the order that they have been added. (FIFO.)
:param e: A Key{Press,Release} event.
:type e: xcb.xproto.Key{Press,Release}Event
:rtype: bool True if the callback was serviced
"""
kc, mods = e.detail, e.state
for mod in TRIVIAL_MODS:
mods &= ~mod
key = (e.event, mods, kc)
serviced = False
for cb in __keybinds.get(key, []):
try:
cb(e)
serviced = True
except TypeError:
cb()
return serviced |
Takes a dictionary of changes (mapping old keycode to new keycode) and
regrabs any keys that have been changed with the updated keycode.
:param changes: Mapping of changes from old keycode to new keycode.
:type changes: dict
:rtype: void
def __regrab(changes):
"""
Takes a dictionary of changes (mapping old keycode to new keycode) and
regrabs any keys that have been changed with the updated keycode.
:param changes: Mapping of changes from old keycode to new keycode.
:type changes: dict
:rtype: void
"""
for wid, mods, kc in __keybinds.keys():
if kc in changes:
ungrab_key(wid, mods, kc)
grab_key(wid, mods, changes[kc])
old = (wid, mods, kc)
new = (wid, mods, changes[kc])
__keybinds[new] = __keybinds[old]
del __keybinds[old] |
Return a list of Storage objects from the API.
Storage types: public, private, normal, backup, cdrom, template, favorite
def get_storages(self, storage_type='normal'):
"""
Return a list of Storage objects from the API.
Storage types: public, private, normal, backup, cdrom, template, favorite
"""
res = self.get_request('/storage/' + storage_type)
return Storage._create_storage_objs(res['storages'], cloud_manager=self) |
Return a Storage object from the API.
def get_storage(self, storage):
"""
Return a Storage object from the API.
"""
res = self.get_request('/storage/' + str(storage))
return Storage(cloud_manager=self, **res['storage']) |
Create a Storage object. Returns an object based on the API's response.
def create_storage(self, size=10, tier='maxiops', title='Storage disk', zone='fi-hel1', backup_rule={}):
"""
Create a Storage object. Returns an object based on the API's response.
"""
body = {
'storage': {
'size': size,
'tier': tier,
'title': title,
'zone': zone,
'backup_rule': backup_rule
}
}
res = self.post_request('/storage', body)
return Storage(cloud_manager=self, **res['storage']) |
Modify a Storage object. Returns an object based on the API's response.
def modify_storage(self, storage, size, title, backup_rule={}):
"""
Modify a Storage object. Returns an object based on the API's response.
"""
res = self._modify_storage(str(storage), size, title, backup_rule)
return Storage(cloud_manager=self, **res['storage']) |
Attach a Storage object to a Server. Return a list of the server's storages.
def attach_storage(self, server, storage, storage_type, address):
"""
Attach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {}}
if storage:
body['storage_device']['storage'] = str(storage)
if storage_type:
body['storage_device']['type'] = storage_type
if address:
body['storage_device']['address'] = address
url = '/server/{0}/storage/attach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self) |
Detach a Storage object to a Server. Return a list of the server's storages.
def detach_storage(self, server, address):
"""
Detach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {'address': address}}
url = '/server/{0}/storage/detach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self) |
Reset after repopulating from API.
def _reset(self, **kwargs):
"""
Reset after repopulating from API.
"""
# there are some inconsistenciens in the API regarding these
# note: this could be written in fancier ways, but this way is simpler
if 'uuid' in kwargs:
self.uuid = kwargs['uuid']
elif 'storage' in kwargs: # let's never use storage.storage internally
self.uuid = kwargs['storage']
if 'title' in kwargs:
self.title = kwargs['title']
elif 'storage_title' in kwargs:
self.title = kwargs['storage_title']
if 'size' in kwargs:
self.size = kwargs['size']
elif 'storage_size' in kwargs:
self.size = kwargs['storage_size']
# send the rest to super._reset
filtered_kwargs = dict(
(key, val)
for key, val in kwargs.items()
if key not in ['uuid', 'storage', 'title', 'storage_title', 'size', 'storage_size']
)
super(Storage, self)._reset(**filtered_kwargs) |
Save (modify) the storage to the API.
Note: only size and title are updateable fields.
def save(self):
"""
Save (modify) the storage to the API.
Note: only size and title are updateable fields.
"""
res = self.cloud_manager._modify_storage(self, self.size, self.title)
self._reset(**res['storage']) |
Return a dict that can be serialised to JSON and sent to UpCloud's API.
Uses the convenience attribute `os` for determining `action` and `storage`
fields.
def to_dict(self):
"""
Return a dict that can be serialised to JSON and sent to UpCloud's API.
Uses the convenience attribute `os` for determining `action` and `storage`
fields.
"""
body = {
'tier': self.tier,
'title': self.title,
'size': self.size,
}
# optionals
if hasattr(self, 'address') and self.address:
body['address'] = self.address
if hasattr(self, 'zone') and self.zone:
body['zone'] = self.zone
return body |
Fetches an album by given ID.
:param album_id: the album ID.
:type album_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#albums-album_id`.
def fetch_album(self, album_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches an album by given ID.
:param album_id: the album ID.
:type album_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#albums-album_id`.
'''
url = 'https://api.kkbox.com/v1.1/albums/%s' % album_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) |
Prints name, author, size and age
def lookup(self):
"""
Prints name, author, size and age
"""
print "%s by %s, size: %s, uploaded %s ago" % (self.name, self.author,
self.size, self.age) |
Open url and return amount of pages
def _get_max_page(self, url):
"""
Open url and return amount of pages
"""
html = requests.get(url).text
pq = PyQuery(html)
try:
tds = int(pq("h2").text().split()[-1])
if tds % 25:
return tds / 25 + 1
return tds / 25
except ValueError:
raise ValueError("No results found!") |
Build and return url. Also update max_page.
def build(self, update=True):
"""
Build and return url. Also update max_page.
"""
ret = self.base + self.query
page = "".join(("/", str(self.page), "/"))
if self.category:
category = " category:" + self.category
else:
category = ""
if self.order:
order = "".join(("?field=", self.order[0], "&sorder=", self.order[1]))
else:
order = ""
ret = "".join((self.base, self.query, category, page, order))
if update:
self.max_page = self._get_max_page(ret)
return ret |
Build and return url. Also update max_page.
URL structure for user torrent lists differs from other result lists
as the page number is part of the query string and not the URL path
def build(self, update=True):
"""
Build and return url. Also update max_page.
URL structure for user torrent lists differs from other result lists
as the page number is part of the query string and not the URL path
"""
query_str = "?page={}".format(self.page)
if self.order:
query_str += "".join(("&field=", self.order[0], "&sorder=",self.order[1]))
ret = "".join((self.base, self.user, "/uploads/", query_str))
if update:
self.max_page = self._get_max_page(ret)
return ret |
Parse url and yield namedtuple Torrent for every torrent on page
def _items(self):
"""
Parse url and yield namedtuple Torrent for every torrent on page
"""
torrents = map(self._get_torrent, self._get_rows())
for t in torrents:
yield t |
Parse row into namedtuple
def _get_torrent(self, row):
"""
Parse row into namedtuple
"""
td = row("td")
name = td("a.cellMainLink").text()
name = name.replace(" . ", ".").replace(" .", ".")
author = td("a.plain").text()
verified_author = True if td(".lightgrey>.ka-verify") else False
category = td("span").find("strong").find("a").eq(0).text()
verified_torrent = True if td(".icon16>.ka-green") else False
comments = td(".iaconbox>.icommentjs>.iconvalue").text()
torrent_link = "http://" + BASE.domain
if td("a.cellMainLink").attr("href") is not None:
torrent_link += td("a.cellMainLink").attr("href")
magnet_link = td("a[data-nop]").eq(1).attr("href")
download_link = td("a[data-download]").attr("href")
td_centers = row("td.center")
size = td_centers.eq(0).text()
files = td_centers.eq(1).text()
age = " ".join(td_centers.eq(2).text().split())
seed = td_centers.eq(3).text()
leech = td_centers.eq(4).text()
return Torrent(name, author, verified_author, category, size,
files, age, seed, leech, verified_torrent, comments,
torrent_link, magnet_link, download_link) |
Return all rows on page
def _get_rows(self):
"""
Return all rows on page
"""
html = requests.get(self.url.build()).text
if re.search('did not match any documents', html):
return []
pq = PyQuery(html)
rows = pq("table.data").find("tr")
return map(rows.eq, range(rows.size()))[1:] |
Yield torrents in range from page_from to page_to
def pages(self, page_from, page_to):
"""
Yield torrents in range from page_from to page_to
"""
if not all([page_from < self.url.max_page, page_from > 0,
page_to <= self.url.max_page, page_to > page_from]):
raise IndexError("Invalid page numbers")
size = (page_to + 1) - page_from
threads = ret = []
page_list = range(page_from, page_to+1)
locks = [threading.Lock() for i in range(size)]
for lock in locks[1:]:
lock.acquire()
def t_function(pos):
"""
Thread function that fetch page for list of torrents
"""
res = self.page(page_list[pos]).list()
locks[pos].acquire()
ret.extend(res)
if pos != size-1:
locks[pos+1].release()
threads = [threading.Thread(target=t_function, args=(i,))
for i in range(size)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for torrent in ret:
yield torrent |
Yield torrents in range from current page to last page
def all(self):
"""
Yield torrents in range from current page to last page
"""
return self.pages(self.url.page, self.url.max_page) |
Set field and order set by arguments
def order(self, field, order=None):
"""
Set field and order set by arguments
"""
if not order:
order = ORDER.DESC
self.url.order = (field, order)
self.url.set_page(1)
return self |
Change category of current search and return self
def category(self, category):
"""
Change category of current search and return self
"""
self.url.category = category
self.url.set_page(1)
return self |
Remove this FirewallRule from the API.
This instance must be associated with a server for this method to work,
which is done by instantiating via server.get_firewall_rules().
def destroy(self):
"""
Remove this FirewallRule from the API.
This instance must be associated with a server for this method to work,
which is done by instantiating via server.get_firewall_rules().
"""
if not hasattr(self, 'server') or not self.server:
raise Exception(
"""FirewallRule not associated with server;
please use or server.get_firewall_rules() to get objects
that are associated with a server.
""")
return self.server.cloud_manager.delete_firewall_rule(
self.server.uuid,
self.position
) |
Fetches new release categories by given ID.
:param category_id: the station ID.
:type category_id: str
:param terr: the current territory.
:return: API response.
:rtype: list
See `https://docs-en.kkbox.codes/v1.1/reference#newreleasecategories-category_id`
def fetch_new_release_category(self, category_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches new release categories by given ID.
:param category_id: the station ID.
:type category_id: str
:param terr: the current territory.
:return: API response.
:rtype: list
See `https://docs-en.kkbox.codes/v1.1/reference#newreleasecategories-category_id`
'''
url = 'https://api.kkbox.com/v1.1/new-release-categories/%s' % category_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) |
Fetcher top tracks belong to an artist by given ID.
:param artist_id: the artist ID.
:type artist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See 'https://docs-en.kkbox.codes/v1.1/reference#artists-artist_id-toptracks'
def fetch_top_tracks_of_artist(self, artist_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetcher top tracks belong to an artist by given ID.
:param artist_id: the artist ID.
:type artist_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See 'https://docs-en.kkbox.codes/v1.1/reference#artists-artist_id-toptracks'
'''
url = 'https://api.kkbox.com/v1.1/artists/%s/top-tracks' % artist_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) |
List all tags as Tag objects.
def get_tags(self):
"""List all tags as Tag objects."""
res = self.get_request('/tag')
return [Tag(cloud_manager=self, **tag) for tag in res['tags']['tag']] |
Return the tag as Tag object.
def get_tag(self, name):
"""Return the tag as Tag object."""
res = self.get_request('/tag/' + name)
return Tag(cloud_manager=self, **res['tag']) |
Create a new Tag. Only name is mandatory.
Returns the created Tag object.
def create_tag(self, name, description=None, servers=[]):
"""
Create a new Tag. Only name is mandatory.
Returns the created Tag object.
"""
servers = [str(server) for server in servers]
body = {'tag': Tag(name, description, servers).to_dict()}
res = self.request('POST', '/tag', body)
return Tag(cloud_manager=self, **res['tag']) |
PUT /tag/name. Returns a dict that can be used to create a Tag object.
Private method used by the Tag class and TagManager.modify_tag.
def _modify_tag(self, name, description, servers, new_name):
"""
PUT /tag/name. Returns a dict that can be used to create a Tag object.
Private method used by the Tag class and TagManager.modify_tag.
"""
body = {'tag': Tag(new_name, description, servers).to_dict()}
res = self.request('PUT', '/tag/' + name, body)
return res['tag'] |
PUT /tag/name. Returns a new Tag object based on the API response.
def modify_tag(self, name, description=None, servers=None, new_name=None):
"""
PUT /tag/name. Returns a new Tag object based on the API response.
"""
res = self._modify_tag(name, description, servers, new_name)
return Tag(cloud_manager=self, **res['tag']) |
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
def remove_tags(self, server, tags):
"""
Remove tags from a server.
- server: Server object or UUID string
- tags: list of Tag objects or strings
"""
uuid = str(server)
tags = [str(tag) for tag in tags]
url = '/server/{0}/untag/{1}'.format(uuid, ','.join(tags))
return self.post_request(url) |
Helper for assigning object attributes from API responses.
def assignIfExists(opts, default=None, **kwargs):
"""
Helper for assigning object attributes from API responses.
"""
for opt in opts:
if(opt in kwargs):
return kwargs[opt]
return default |
Try a given operation (API call) n times.
Raises if the API call fails with an error_code that is not expected.
Raises if the API call has not succeeded within n attempts.
Waits 3 seconds betwee each attempt.
def try_it_n_times(operation, expected_error_codes, custom_error='operation failed', n=10):
"""
Try a given operation (API call) n times.
Raises if the API call fails with an error_code that is not expected.
Raises if the API call has not succeeded within n attempts.
Waits 3 seconds betwee each attempt.
"""
for i in itertools.count():
try:
operation()
break
except UpCloudAPIError as e:
if e.error_code not in expected_error_codes:
raise e
sleep(3)
if i >= n - 1:
raise UpCloudClientError(custom_error) |
Extract a pseudorandom key suitable for use with hkdf_expand
from the input_key_material and a salt using HMAC with the
provided hash (default SHA-512).
salt should be a random, application-specific byte string. If
salt is None or the empty string, an all-zeros string of the same
length as the hash's block size will be used instead per the RFC.
See the HKDF draft RFC and paper for usage notes.
def hkdf_extract(salt, input_key_material, hash=hashlib.sha512):
'''
Extract a pseudorandom key suitable for use with hkdf_expand
from the input_key_material and a salt using HMAC with the
provided hash (default SHA-512).
salt should be a random, application-specific byte string. If
salt is None or the empty string, an all-zeros string of the same
length as the hash's block size will be used instead per the RFC.
See the HKDF draft RFC and paper for usage notes.
'''
hash_len = hash().digest_size
if salt == None or len(salt) == 0:
salt = bytearray((0,) * hash_len)
return hmac.new(bytes(salt), buffer(input_key_material), hash).digest() |
Expand `pseudo_random_key` and `info` into a key of length `bytes` using
HKDF's expand function based on HMAC with the provided hash (default
SHA-512). See the HKDF draft RFC and paper for usage notes.
def hkdf_expand(pseudo_random_key, info=b"", length=32, hash=hashlib.sha512):
'''
Expand `pseudo_random_key` and `info` into a key of length `bytes` using
HKDF's expand function based on HMAC with the provided hash (default
SHA-512). See the HKDF draft RFC and paper for usage notes.
'''
hash_len = hash().digest_size
length = int(length)
if length > 255 * hash_len:
raise Exception("Cannot expand to more than 255 * %d = %d bytes using the specified hash function" %\
(hash_len, 255 * hash_len))
blocks_needed = length // hash_len + (0 if length % hash_len == 0 else 1) # ceil
okm = b""
output_block = b""
for counter in range(blocks_needed):
output_block = hmac.new(pseudo_random_key, buffer(output_block + info + bytearray((counter + 1,))),\
hash).digest()
okm += output_block
return okm[:length] |
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
def expand(self, info=b"", length=32):
'''
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
'''
return hkdf_expand(self._prk, info, length, self._hash) |
Helper function for creating Server.login_user blocks.
(see: https://www.upcloud.com/api/8-servers/#create-server)
def login_user_block(username, ssh_keys, create_password=True):
"""
Helper function for creating Server.login_user blocks.
(see: https://www.upcloud.com/api/8-servers/#create-server)
"""
block = {
'create_password': 'yes' if create_password is True else 'no',
'ssh_keys': {
'ssh_key': ssh_keys
}
}
if username:
block['username'] = username
return block |
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
def _reset(self, server, **kwargs):
"""
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
"""
if server:
# handle storage, ip_address dicts and tags if they exist
Server._handle_server_subobjs(server, kwargs.get('cloud_manager'))
for key in server:
object.__setattr__(self, key, server[key])
for key in kwargs:
object.__setattr__(self, key, kwargs[key]) |
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
def populate(self):
"""
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
"""
server, IPAddresses, storages = self.cloud_manager.get_server_data(self.uuid)
self._reset(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True
)
return self |
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
def save(self):
"""
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
"""
# dict comprehension that also works with 2.6
# http://stackoverflow.com/questions/21069668/alternative-to-dict-comprehension-prior-to-python-2-7
kwargs = dict(
(field, getattr(self, field))
for field in self.updateable_fields
if hasattr(self, field)
)
self.cloud_manager.modify_server(self.uuid, **kwargs)
self._reset(kwargs) |
Shutdown/stop the server. By default, issue a soft shutdown with a timeout of 30s.
After the a timeout a hard shutdown is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
def shutdown(self, hard=False, timeout=30):
"""
Shutdown/stop the server. By default, issue a soft shutdown with a timeout of 30s.
After the a timeout a hard shutdown is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['stop_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': '{0}'.format(timeout)
}
path = '/server/{0}/stop'.format(self.uuid)
self.cloud_manager.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance') |
Start the server. Note: slow and blocking request.
The API waits for confirmation from UpCloud's IaaS backend before responding.
def start(self, timeout=120):
"""
Start the server. Note: slow and blocking request.
The API waits for confirmation from UpCloud's IaaS backend before responding.
"""
path = '/server/{0}/start'.format(self.uuid)
self.cloud_manager.post_request(path, timeout=timeout)
object.__setattr__(self, 'state', 'started') |
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
def restart(self, hard=False, timeout=30, force=True):
"""
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['restart_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': '{0}'.format(timeout),
'timeout_action': 'destroy' if force else 'ignore'
}
path = '/server/{0}/restart'.format(self.uuid)
self.cloud_manager.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance') |
Allocate a new (random) IP-address to the Server.
def add_ip(self, family='IPv4'):
"""
Allocate a new (random) IP-address to the Server.
"""
IP = self.cloud_manager.attach_ip(self.uuid, family)
self.ip_addresses.append(IP)
return IP |
Release the specified IP-address from the server.
def remove_ip(self, IPAddress):
"""
Release the specified IP-address from the server.
"""
self.cloud_manager.release_ip(IPAddress.address)
self.ip_addresses.remove(IPAddress) |
Attach the given storage to the Server.
Default address is next available.
def add_storage(self, storage=None, type='disk', address=None):
"""
Attach the given storage to the Server.
Default address is next available.
"""
self.cloud_manager.attach_storage(server=self.uuid,
storage=storage.uuid,
storage_type=type,
address=address)
storage.address = address
storage.type = type
self.storage_devices.append(storage) |
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
def remove_storage(self, storage):
"""
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
"""
if not hasattr(storage, 'address'):
raise Exception(
('Storage does not have an address. '
'Access the Storage via Server.storage_devices '
'so they include an address. '
'(This is due how the API handles Storages)')
)
self.cloud_manager.detach_storage(server=self.uuid, address=storage.address)
self.storage_devices.remove(storage) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.