text stringlengths 81 112k |
|---|
Read content. See file.read
def read(self, limit=-1):
"""Read content. See file.read"""
remaining = self.len - self.parent_fd.tell() + self.offset
if limit > remaining or limit == -1:
limit = remaining
return self.parent_fd.read(limit) |
Seek to position in stream, see file.seek
def seek(self, offset, whence=os.SEEK_SET):
"""Seek to position in stream, see file.seek"""
pos = None
if whence == os.SEEK_SET:
pos = self.offset + offset
elif whence == os.SEEK_CUR:
pos = self.tell() + offset
elif whence == os.SEEK_END:
pos = self.offset + self.len + offset
else:
raise ValueError("invalid whence {}".format(whence))
if pos > self.offset + self.len or pos < self.offset:
raise ValueError("seek position beyond chunk area")
self.parent_fd.seek(pos, os.SEEK_SET) |
Close file, see file.close
def close(self):
"""Close file, see file.close"""
try:
self.parent_fd.fileno()
except io.UnsupportedOperation:
logger.debug("Not closing parent_fd - reusing existing")
else:
self.parent_fd.close() |
Prepare query string
def _build_query(self, uri, params=None, action_token_type=None):
"""Prepare query string"""
if params is None:
params = QueryParams()
params['response_format'] = 'json'
session_token = None
if action_token_type in self._action_tokens:
# Favor action token
using_action_token = True
session_token = self._action_tokens[action_token_type]
else:
using_action_token = False
if self._session:
session_token = self._session['session_token']
if session_token:
params['session_token'] = session_token
# make order of parameters predictable for testing
keys = list(params.keys())
keys.sort()
query = urlencode([tuple([key, params[key]]) for key in keys])
if not using_action_token and self._session:
secret_key_mod = int(self._session['secret_key']) % 256
signature_base = (str(secret_key_mod) +
self._session['time'] +
uri + '?' + query).encode('ascii')
query += '&signature=' + hashlib.md5(signature_base).hexdigest()
return query |
Perform request to MediaFire API
action -- "category/name" of method to call
params -- dict of parameters or query string
action_token_type -- action token to use: None, "upload", "image"
upload_info -- in case of upload, dict of "fd" and "filename"
headers -- additional headers to send (used for upload)
session_token and signature generation/update is handled automatically
def request(self, action, params=None, action_token_type=None,
upload_info=None, headers=None):
"""Perform request to MediaFire API
action -- "category/name" of method to call
params -- dict of parameters or query string
action_token_type -- action token to use: None, "upload", "image"
upload_info -- in case of upload, dict of "fd" and "filename"
headers -- additional headers to send (used for upload)
session_token and signature generation/update is handled automatically
"""
uri = self._build_uri(action)
if isinstance(params, six.text_type):
query = params
else:
query = self._build_query(uri, params, action_token_type)
if headers is None:
headers = {}
if upload_info is None:
# Use request body for query
data = query
headers['Content-Type'] = FORM_MIMETYPE
else:
# Use query string for query since payload is file
uri += '?' + query
if "filename" in upload_info:
data = MultipartEncoder(
fields={'file': (
upload_info["filename"],
upload_info["fd"],
UPLOAD_MIMETYPE
)}
)
headers["Content-Type"] = data.content_type
else:
data = upload_info["fd"]
headers["Content-Type"] = UPLOAD_MIMETYPE
logger.debug("uri=%s query=%s",
uri, query if not upload_info else None)
try:
# bytes from now on
url = (API_BASE + uri).encode('utf-8')
if isinstance(data, six.text_type):
# request's data is bytes, dict, or filehandle
data = data.encode('utf-8')
response = self.http.post(url, data=data,
headers=headers, stream=True)
except RequestException as ex:
logger.exception("HTTP request failed")
raise MediaFireConnectionError(
"RequestException: {}".format(ex))
return self._process_response(response) |
Parse response
def _process_response(self, response):
"""Parse response"""
forward_raw = False
content_type = response.headers['Content-Type']
if content_type != 'application/json':
logger.debug("headers: %s", response.headers)
# API BUG: text/xml content-type with json payload
# http://forum.mediafiredev.com/showthread.php?136
if content_type == 'text/xml':
# we never request xml, so check it quacks like JSON
if not response.text.lstrip().startswith('{'):
forward_raw = True
else:
# _process_response can't deal with non-json,
# return response as is
forward_raw = True
if forward_raw:
response.raise_for_status()
return response
logger.debug("response: %s", response.text)
# if we are here, then most likely have json
try:
response_node = response.json()['response']
except ValueError:
# promised JSON but failed
raise MediaFireApiError("JSON decode failure")
if response_node.get('new_key', 'no') == 'yes':
self._regenerate_secret_key()
# check for errors
if response_node['result'] != 'Success':
raise MediaFireApiError(response_node['message'],
response_node['error'])
return response_node |
Regenerate secret key
http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature
def _regenerate_secret_key(self):
"""Regenerate secret key
http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature
"""
# Don't regenerate the key if we have none
if self._session and 'secret_key' in self._session:
self._session['secret_key'] = (
int(self._session['secret_key']) * 16807) % 2147483647 |
Set session token
value -- dict returned by user/get_session_token
def session(self, value):
"""Set session token
value -- dict returned by user/get_session_token"""
# unset session token
if value is None:
self._session = None
return
if not isinstance(value, dict):
raise ValueError("session info is required")
session_parsed = {}
for key in ["session_token", "time", "secret_key"]:
if key not in value:
raise ValueError("Missing parameter: {}".format(key))
session_parsed[key] = value[key]
for key in ["ekey", "pkey"]:
# nice to have, but not mandatory
if key in value:
session_parsed[key] = value[key]
self._session = session_parsed |
Set action tokens
type_ -- either "upload" or "image"
action_token -- string obtained from user/get_action_token,
set None to remove the token
def set_action_token(self, type_=None, action_token=None):
"""Set action tokens
type_ -- either "upload" or "image"
action_token -- string obtained from user/get_action_token,
set None to remove the token
"""
if action_token is None:
del self._action_tokens[type_]
else:
self._action_tokens[type_] = action_token |
user/get_session_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token
def user_get_session_token(self, app_id=None, email=None, password=None,
ekey=None, fb_access_token=None,
tw_oauth_token=None,
tw_oauth_token_secret=None, api_key=None):
"""user/get_session_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token
"""
if app_id is None:
raise ValueError("app_id must be defined")
params = QueryParams({
'application_id': str(app_id),
'token_version': 2,
'response_format': 'json'
})
if fb_access_token:
params['fb_access_token'] = fb_access_token
signature_keys = ['fb_access_token']
elif tw_oauth_token and tw_oauth_token_secret:
params['tw_oauth_token'] = tw_oauth_token
params['tw_oauth_token_secret'] = tw_oauth_token_secret
signature_keys = ['tw_oauth_token',
'tw_oauth_token_secret']
elif (email or ekey) and password:
signature_keys = []
if email:
signature_keys.append('email')
params['email'] = email
if ekey:
signature_keys.append('ekey')
params['ekey'] = ekey
params['password'] = password
signature_keys.append('password')
else:
raise ValueError("Credentials not provided")
signature_keys.append('application_id')
signature = hashlib.sha1()
for key in signature_keys:
signature.update(str(params[key]).encode('ascii'))
# Note: If the app uses a callback URL to provide its API key,
# or if it does not have the "Require Secret Key" option checked,
# then the API key may be omitted from the signature
if api_key:
signature.update(api_key.encode('ascii'))
query = urlencode(params)
query += '&signature=' + signature.hexdigest()
return self.request('user/get_session_token', params=query) |
user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
def user_set_avatar(self, action=None, quick_key=None, url=None):
"""user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
"""
return self.request("user/set_avatar", QueryParams({
"action": action,
"quick_key": quick_key,
"url": url
})) |
user/update
http://www.mediafire.com/developers/core_api/1.3/user/#update
def user_update(self, display_name=None, first_name=None, last_name=None,
email=None, password=None, current_password=None,
birth_date=None, gender=None, website=None, subdomain=None,
location=None, newsletter=None, primary_usage=None,
timezone=None):
"""
user/update
http://www.mediafire.com/developers/core_api/1.3/user/#update
"""
return self.request("user/update", QueryParams({
"display_name": display_name,
"first_name": first_name,
"last_name": last_name,
"email": email,
"password": password,
"current_password": current_password,
"birth_date": birth_date,
"gender": gender,
"website": website,
"subdomain": subdomain,
"location": location,
"newsletter": newsletter,
"primary_usage": primary_usage,
"timezone": timezone
})) |
folder/get_info
http://www.mediafire.com/developers/core_api/1.3/folder/#get_info
def folder_get_info(self, folder_key=None, device_id=None, details=None):
"""folder/get_info
http://www.mediafire.com/developers/core_api/1.3/folder/#get_info
"""
return self.request('folder/get_info', QueryParams({
'folder_key': folder_key,
'device_id': device_id,
'details': details
})) |
folder/get_content
http://www.mediafire.com/developers/core_api/1.3/folder/#get_content
def folder_get_content(self, folder_key=None, content_type=None,
filter_=None, device_id=None, order_by=None,
order_direction=None, chunk=None, details=None,
chunk_size=None):
"""folder/get_content
http://www.mediafire.com/developers/core_api/1.3/folder/#get_content
"""
return self.request('folder/get_content', QueryParams({
'folder_key': folder_key,
'content_type': content_type,
'filter': filter_,
'device_id': device_id,
'order_by': order_by,
'order_direction': order_direction,
'chunk': chunk,
'details': details,
'chunk_size': chunk_size
})) |
folder/update
http://www.mediafire.com/developers/core_api/1.3/folder/#update
def folder_update(self, folder_key, foldername=None, description=None,
privacy=None, privacy_recursive=None, mtime=None):
"""folder/update
http://www.mediafire.com/developers/core_api/1.3/folder/#update
"""
return self.request('folder/update', QueryParams({
'folder_key': folder_key,
'foldername': foldername,
'description': description,
'privacy': privacy,
'privacy_recursive': privacy_recursive,
'mtime': mtime
})) |
folder/create
http://www.mediafire.com/developers/core_api/1.3/folder/#create
def folder_create(self, foldername=None, parent_key=None,
action_on_duplicate=None, mtime=None):
"""folder/create
http://www.mediafire.com/developers/core_api/1.3/folder/#create
"""
return self.request('folder/create', QueryParams({
'foldername': foldername,
'parent_key': parent_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
})) |
upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
def upload_check(self, filename=None, folder_key=None, filedrop_key=None,
size=None, hash_=None, path=None, resumable=None):
"""upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
"""
return self.request('upload/check', QueryParams({
'filename': filename,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'size': size,
'hash': hash_,
'path': path,
'resumable': resumable
})) |
upload/simple
http://www.mediafire.com/developers/core_api/1.3/upload/#simple
def upload_simple(self, fd, filename, folder_key=None, path=None,
filedrop_key=None, action_on_duplicate=None,
mtime=None, file_size=None, file_hash=None):
"""upload/simple
http://www.mediafire.com/developers/core_api/1.3/upload/#simple
"""
action = 'upload/simple'
params = QueryParams({
'folder_key': folder_key,
'path': path,
'filedrop_key': filedrop_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
})
headers = QueryParams({
'X-Filesize': str(file_size),
'X-Filehash': file_hash,
'X-Filename': filename.encode('utf-8')
})
upload_info = {
"fd": fd,
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers) |
upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
"""upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
"""
action = 'upload/resumable'
headers = {
'x-filesize': str(filesize),
'x-filehash': filehash,
'x-unit-hash': unit_hash,
'x-unit-id': str(unit_id),
'x-unit-size': str(unit_size)
}
params = QueryParams({
'quick_key': quick_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'previous_hash': previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers) |
upload/instant
http://www.mediafire.com/developers/core_api/1.3/upload/#instant
def upload_instant(self, filename, size, hash_, quick_key=None,
folder_key=None, filedrop_key=None, path=None,
action_on_duplicate=None, mtime=None,
version_control=None, previous_hash=None):
"""upload/instant
http://www.mediafire.com/developers/core_api/1.3/upload/#instant
"""
return self.request('upload/instant', QueryParams({
'filename': filename,
'size': size,
'hash': hash_,
'quick_key': quick_key,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'previous_hash': previous_hash
})) |
file/update
http://www.mediafire.com/developers/core_api/1.3/file/#update
def file_update(self, quick_key, filename=None, description=None,
mtime=None, privacy=None):
"""file/update
http://www.mediafire.com/developers/core_api/1.3/file/#update
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy
})) |
file/update_file
http://www.mediafire.com/developers/core_api/1.3/file/#update_file
def file_update_file(self, quick_key, file_extension=None, filename=None,
description=None, mtime=None, privacy=None,
timezone=None):
"""file/update_file
http://www.mediafire.com/developers/core_api/1.3/file/#update_file
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'file_extension': file_extension,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy,
'timezone': timezone
})) |
file/zip
http://www.mediafire.com/developers/core_api/1.3/file/#zip
def file_zip(self, keys, confirm_download=None, meta_only=None):
"""file/zip
http://www.mediafire.com/developers/core_api/1.3/file/#zip
"""
return self.request('file/zip', QueryParams({
'keys': keys,
'confirm_download': confirm_download,
'meta_only': meta_only
})) |
Reset all of our stateful variables
def _reset(self):
'''Reset all of our stateful variables'''
self._socket = None
# The pending messages we have to send, and the current buffer we're
# sending
self._pending = deque()
self._out_buffer = ''
# Our read buffer
self._buffer = ''
# The identify response we last received from the server
self._identify_response = {}
# Our ready state
self.last_ready_sent = 0
self.ready = 0 |
Establish a connection
def connect(self, force=False):
'''Establish a connection'''
# Don't re-establish existing connections
if not force and self.alive():
return True
self._reset()
# Otherwise, try to connect
with self._socket_lock:
try:
logger.info('Creating socket...')
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._timeout)
logger.info('Connecting to %s, %s', self.host, self.port)
self._socket.connect((self.host, self.port))
# Set our socket's blocking state to whatever ours is
self._socket.setblocking(self._blocking)
# Safely write our magic
self._pending.append(constants.MAGIC_V2)
while self.pending():
self.flush()
# And send our identify command
self.identify(self._identify_options)
while self.pending():
self.flush()
self._reconnnection_counter.success()
# Wait until we've gotten a response to IDENTIFY, try to read
# one. Also, only spend up to the provided timeout waiting to
# establish the connection.
limit = time.time() + self._timeout
responses = self._read(1)
while (not responses) and (time.time() < limit):
responses = self._read(1)
if not responses:
raise ConnectionTimeoutException(
'Read identify response timed out (%ss)' % self._timeout)
self.identified(responses[0])
return True
except:
logger.exception('Failed to connect')
if self._socket:
self._socket.close()
self._reconnnection_counter.failed()
self._reset()
return False |
Close our connection
def close(self):
'''Close our connection'''
# Flush any unsent message
try:
while self.pending():
self.flush()
except socket.error:
pass
with self._socket_lock:
try:
if self._socket:
self._socket.close()
finally:
self._reset() |
Blockingly yield the socket
def socket(self, blocking=True):
'''Blockingly yield the socket'''
# If the socket is available, then yield it. Otherwise, yield nothing
if self._socket_lock.acquire(blocking):
try:
yield self._socket
finally:
self._socket_lock.release() |
Handle a response to our 'identify' command. Returns response
def identified(self, res):
'''Handle a response to our 'identify' command. Returns response'''
# If they support it, they should give us a JSON blob which we should
# inspect.
try:
res.data = json.loads(res.data)
self._identify_response = res.data
logger.info('Got identify response: %s', res.data)
except:
logger.warn('Server does not support feature negotiation')
self._identify_response = {}
# Save our max ready count unless it's not provided
self.max_rdy_count = self._identify_response.get(
'max_rdy_count', self.max_rdy_count)
if self._identify_options.get('tls_v1', False):
if not self._identify_response.get('tls_v1', False):
raise UnsupportedException(
'NSQd instance does not support TLS')
else:
self._socket = TLSSocket.wrap_socket(self._socket)
# Now is the appropriate time to send auth
if self._identify_response.get('auth_required', False):
if not self._auth_secret:
raise UnsupportedException(
'Auth required but not provided')
else:
self.auth(self._auth_secret)
# If we're not talking over TLS, warn the user
if not self._identify_response.get('tls_v1', False):
logger.warn('Using AUTH without TLS')
elif self._auth_secret:
logger.warn('Authentication secret provided but not required')
return res |
Set whether or not this message is blocking
def setblocking(self, blocking):
'''Set whether or not this message is blocking'''
for sock in self.socket():
sock.setblocking(blocking)
self._blocking = blocking |
Flush some of the waiting messages, returns count written
def flush(self):
'''Flush some of the waiting messages, returns count written'''
# When profiling, we found that while there was some efficiency to be
# gained elsewhere, the big performance hit is sending lots of small
# messages at a time. In particular, consumers send many 'FIN' messages
# which are very small indeed and the cost of dispatching so many system
# calls is very high. Instead, we prefer to glom together many messages
# into a single string to send at once.
total = 0
for sock in self.socket(blocking=False):
# If there's nothing left in the out buffer, take whatever's in the
# pending queue.
#
# When using SSL, if the socket throws 'SSL_WANT_WRITE', then the
# subsequent send requests have to send the same buffer.
pending = self._pending
data = self._out_buffer or ''.join(
pending.popleft() for _ in xrange(len(pending)))
try:
# Try to send as much of the first message as possible
total = sock.send(data)
except socket.error as exc:
# Catch (errno, message)-type socket.errors
if exc.args[0] not in self.WOULD_BLOCK_ERRS:
raise
self._out_buffer = data
else:
self._out_buffer = None
finally:
if total < len(data):
# Save the rest of the message that could not be sent
self._pending.appendleft(data[total:])
return total |
Send a command over the socket with length endcoded
def send(self, command, message=None):
'''Send a command over the socket with length endcoded'''
if message:
joined = command + constants.NL + util.pack(message)
else:
joined = command + constants.NL
if self._blocking:
for sock in self.socket():
sock.sendall(joined)
else:
self._pending.append(joined) |
Send an identification message
def identify(self, data):
'''Send an identification message'''
return self.send(constants.IDENTIFY, json.dumps(data)) |
Subscribe to a topic/channel
def sub(self, topic, channel):
'''Subscribe to a topic/channel'''
return self.send(' '.join((constants.SUB, topic, channel))) |
Publish to a topic
def pub(self, topic, message):
'''Publish to a topic'''
return self.send(' '.join((constants.PUB, topic)), message) |
Publish multiple messages to a topic
def mpub(self, topic, *messages):
'''Publish multiple messages to a topic'''
return self.send(constants.MPUB + ' ' + topic, messages) |
Indicate that you're ready to receive
def rdy(self, count):
'''Indicate that you're ready to receive'''
self.ready = count
self.last_ready_sent = count
return self.send(constants.RDY + ' ' + str(count)) |
Re-queue a message
def req(self, message_id, timeout):
'''Re-queue a message'''
return self.send(constants.REQ + ' ' + message_id + ' ' + str(timeout)) |
Return all the responses read
def _read(self, limit=1000):
'''Return all the responses read'''
# It's important to know that it may return no responses or multiple
# responses. It depends on how the buffering works out. First, read from
# the socket
for sock in self.socket():
if sock is None:
# Race condition. Connection has been closed.
return []
try:
packet = sock.recv(4096)
except socket.timeout:
# If the socket times out, return nothing
return []
except socket.error as exc:
# Catch (errno, message)-type socket.errors
if exc.args[0] in self.WOULD_BLOCK_ERRS:
return []
else:
raise
# Append our newly-read data to our buffer
self._buffer += packet
responses = []
total = 0
buf = self._buffer
remaining = len(buf)
while limit and (remaining >= 4):
size = struct.unpack('>l', buf[total:(total + 4)])[0]
# Now check to see if there's enough left in the buffer to read
# the message.
if (remaining - 4) >= size:
responses.append(Response.from_raw(
self, buf[(total + 4):(total + size + 4)]))
total += (size + 4)
remaining -= (size + 4)
limit -= 1
else:
break
self._buffer = self._buffer[total:]
return responses |
Responses from an established socket
def read(self):
'''Responses from an established socket'''
responses = self._read()
# Determine the number of messages in here and decrement our ready
# count appropriately
self.ready -= sum(
map(int, (r.frame_type == Message.FRAME_TYPE for r in responses)))
return responses |
Run the discovery mechanism
def discover(self, topic):
'''Run the discovery mechanism'''
logger.info('Discovering on topic %s', topic)
producers = []
for lookupd in self._lookupd:
logger.info('Discovering on %s', lookupd)
try:
# Find all the current producers on this instance
for producer in lookupd.lookup(topic)['producers']:
logger.info('Found producer %s on %s', producer, lookupd)
producers.append(
(producer['broadcast_address'], producer['tcp_port']))
except ClientException:
logger.exception('Failed to query %s', lookupd)
new = []
for host, port in producers:
conn = self._connections.get((host, port))
if not conn:
logger.info('Discovered %s:%s', host, port)
new.append(self.connect(host, port))
elif not conn.alive():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn)
else:
logger.debug('Connection to %s:%s still alive', host, port)
# And return all the new connections
return [conn for conn in new if conn] |
Connect to all the appropriate instances
def check_connections(self):
'''Connect to all the appropriate instances'''
logger.info('Checking connections')
if self._lookupd:
self.discover(self._topic)
# Make sure we're connected to all the prescribed hosts
for hostspec in self._nsqd_tcp_addresses:
logger.debug('Checking nsqd instance %s', hostspec)
host, port = hostspec.split(':')
port = int(port)
conn = self._connections.get((host, port), None)
# If there is no connection to it, we have to try to connect
if not conn:
logger.info('Connecting to %s:%s', host, port)
self.connect(host, port)
elif not conn.alive():
# If we've connected to it before, but it's no longer alive,
# we'll have to make a decision about when to try to reconnect
# to it, if we need to reconnect to it at all
if conn.ready_to_reconnect():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn)
else:
logger.debug('Checking freshness')
now = time.time()
time_check = math.ceil(now - self.last_recv_timestamp)
if time_check >= ((self.heartbeat_interval * 2) / 1000.0):
if conn.ready_to_reconnect():
logger.info('Reconnecting to %s:%s', host, port)
if conn.connect():
conn.setblocking(0)
self.reconnected(conn) |
Run periodic reconnection checks
def connection_checker(self):
'''Run periodic reconnection checks'''
thread = ConnectionChecker(self)
logger.info('Starting connection-checker thread')
thread.start()
try:
yield thread
finally:
logger.info('Stopping connection-checker')
thread.stop()
logger.info('Joining connection-checker')
thread.join() |
Connect to the provided host, port
def connect(self, host, port):
'''Connect to the provided host, port'''
conn = connection.Connection(host, port,
reconnection_backoff=self._reconnection_backoff,
auth_secret=self._auth_secret,
timeout=self._connect_timeout,
**self._identify_options)
if conn.alive():
conn.setblocking(0)
self.add(conn)
return conn |
Add a connection
def add(self, connection):
'''Add a connection'''
key = (connection.host, connection.port)
with self._lock:
if key not in self._connections:
self._connections[key] = connection
self.added(connection)
return connection
else:
return None |
Remove a connection
def remove(self, connection):
'''Remove a connection'''
key = (connection.host, connection.port)
with self._lock:
found = self._connections.pop(key, None)
try:
self.close_connection(found)
except Exception as exc:
logger.warn('Failed to close %s: %s', connection, exc)
return found |
Read from any of the connections that need it
def read(self):
'''Read from any of the connections that need it'''
# We'll check all living connections
connections = [c for c in self.connections() if c.alive()]
if not connections:
# If there are no connections, obviously we return no messages, but
# we should wait the duration of the timeout
time.sleep(self._timeout)
return []
# Not all connections need to be written to, so we'll only concern
# ourselves with those that require writes
writes = [c for c in connections if c.pending()]
try:
readable, writable, exceptable = select.select(
connections, writes, connections, self._timeout)
except exceptions.ConnectionClosedException:
logger.exception('Tried selecting on closed client')
return []
except select.error:
logger.exception('Error running select')
return []
# If we returned because the timeout interval passed, log it and return
if not (readable or writable or exceptable):
logger.debug('Timed out...')
return []
responses = []
# For each readable socket, we'll try to read some responses
for conn in readable:
try:
for res in conn.read():
# We'll capture heartbeats and respond to them automatically
if (isinstance(res, Response) and res.data == HEARTBEAT):
logger.info('Sending heartbeat to %s', conn)
conn.nop()
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
continue
elif isinstance(res, Error):
nonfatal = (
exceptions.FinFailedException,
exceptions.ReqFailedException,
exceptions.TouchFailedException
)
if not isinstance(res.exception(), nonfatal):
# If it's not any of the non-fatal exceptions, then
# we have to close this connection
logger.error(
'Closing %s: %s', conn, res.exception())
self.close_connection(conn)
responses.append(res)
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
except exceptions.NSQException:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
except socket.error:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
# For each writable socket, flush some data out
for conn in writable:
try:
conn.flush()
except socket.error:
logger.exception('Failed to flush %s', conn)
self.close_connection(conn)
# For each connection with an exception, try to close it and remove it
# from our connections
for conn in exceptable:
self.close_connection(conn)
return responses |
Pick a random living connection
def random_connection(self):
'''Pick a random living connection'''
# While at the moment there's no need for this to be a context manager
# per se, I would like to use that interface since I anticipate
# adding some wrapping around it at some point.
yield random.choice(
[conn for conn in self.connections() if conn.alive()]) |
Wait for a response
def wait_response(self):
'''Wait for a response'''
responses = self.read()
while not responses:
responses = self.read()
return responses |
Publish the provided message to the provided topic
def pub(self, topic, message):
'''Publish the provided message to the provided topic'''
with self.random_connection() as client:
client.pub(topic, message)
return self.wait_response() |
Publish messages to a topic
def mpub(self, topic, *messages):
'''Publish messages to a topic'''
with self.random_connection() as client:
client.mpub(topic, *messages)
return self.wait_response() |
Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
def create_socket(self):
"""Create a socket for the daemon, depending on the directory location.
Args:
config_dir (str): The absolute path to the config directory used by the daemon.
Returns:
socket.socket: The daemon socket. Clients connect to this socket.
"""
socket_path = os.path.join(self.config_dir, 'pueue.sock')
# Create Socket and exit with 1, if socket can't be created
try:
if os.path.exists(socket_path):
os.remove(socket_path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(socket_path)
self.socket.setblocking(0)
self.socket.listen(0)
# Set file permissions
os.chmod(socket_path, stat.S_IRWXU)
except Exception:
self.logger.error("Daemon couldn't socket. Aborting")
self.logger.exception()
sys.exit(1)
return self.socket |
Create all directories needed for logs and configs.
def initialize_directories(self, root_dir):
"""Create all directories needed for logs and configs."""
if not root_dir:
root_dir = os.path.expanduser('~')
# Create config directory, if it doesn't exist
self.config_dir = os.path.join(root_dir, '.config/pueue')
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir) |
Send an answer to the client.
def respond_client(self, answer, socket):
"""Send an answer to the client."""
response = pickle.dumps(answer, -1)
socket.sendall(response)
self.read_list.remove(socket)
socket.close() |
Read a previous configuration file or create a new with default values.
def read_config(self):
"""Read a previous configuration file or create a new with default values."""
config_file = os.path.join(self.config_dir, 'pueue.ini')
self.config = configparser.ConfigParser()
# Try to get configuration file and return it
# If this doesn't work, a new default config file will be created
if os.path.exists(config_file):
try:
self.config.read(config_file)
return
except Exception:
self.logger.error('Error while parsing config file. Deleting old config')
self.logger.exception()
self.config['default'] = {
'resumeAfterStart': False,
'maxProcesses': 1,
'customShell': 'default',
}
self.config['log'] = {
'logTime': 60*60*24*14,
}
self.write_config() |
Write the current configuration to the config file.
def write_config(self):
"""Write the current configuration to the config file."""
config_file = os.path.join(self.config_dir, 'pueue.ini')
with open(config_file, 'w') as file_descriptor:
self.config.write(file_descriptor) |
The main function containing the loop for communication and process management.
This function is the heart of the daemon.
It is responsible for:
- Client communication
- Executing commands from clients
- Update the status of processes by polling the ProcessHandler.
- Logging
- Cleanup on exit
def main(self):
"""The main function containing the loop for communication and process management.
This function is the heart of the daemon.
It is responsible for:
- Client communication
- Executing commands from clients
- Update the status of processes by polling the ProcessHandler.
- Logging
- Cleanup on exit
"""
try:
while self.running:
# Trigger the processing of finished processes by the ProcessHandler.
# If there are finished processes we write the log to keep it up to date.
if self.process_handler.check_finished():
self.logger.write(self.queue)
if self.reset and self.process_handler.all_finished():
# Rotate log and reset queue
self.logger.rotate(self.queue)
self.queue.reset()
self.reset = False
# Check if the ProcessHandler has any free slots to spawn a new process
if not self.paused and not self.reset and self.running:
self.process_handler.check_for_new()
# This is the communication section of the daemon.
# 1. Receive message from the client
# 2. Check payload and call respective function with payload as parameter.
# 3. Execute logic
# 4. Return payload with response to client
# Create list for waitable objects
readable, writable, failed = select.select(self.read_list, [], [], 1)
for waiting_socket in readable:
if waiting_socket is self.socket:
# Listening for clients to connect.
# Client sockets are added to readlist to be processed.
try:
client_socket, client_address = self.socket.accept()
self.read_list.append(client_socket)
except Exception:
self.logger.warning('Daemon rejected client')
else:
# Trying to receive instruction from client socket
try:
instruction = waiting_socket.recv(1048576)
except (EOFError, OSError):
self.logger.warning('Client died while sending message, dropping received data.')
# Remove client socket
self.read_list.remove(waiting_socket)
waiting_socket.close()
instruction = None
# Check for valid instruction
if instruction is not None:
# Check if received data can be unpickled.
try:
payload = pickle.loads(instruction)
except EOFError:
# Instruction is ignored if it can't be unpickled
self.logger.error('Received message is incomplete, dropping received data.')
self.read_list.remove(waiting_socket)
waiting_socket.close()
# Set invalid payload
payload = {'mode': ''}
functions = {
'add': self.add,
'remove': self.remove,
'edit': self.edit_command,
'switch': self.switch,
'send': self.pipe_to_process,
'status': self.send_status,
'start': self.start,
'pause': self.pause,
'stash': self.stash,
'enqueue': self.enqueue,
'restart': self.restart,
'kill': self.kill_process,
'reset': self.reset_everything,
'clear': self.clear,
'config': self.set_config,
'STOPDAEMON': self.stop_daemon,
}
if payload['mode'] in functions.keys():
self.logger.debug('Payload received:')
self.logger.debug(payload)
response = functions[payload['mode']](payload)
self.logger.debug('Sending payload:')
self.logger.debug(response)
try:
self.respond_client(response, waiting_socket)
except (BrokenPipeError):
self.logger.warning('Client disconnected during message dispatching. Function successfully executed anyway.')
# Remove client socket
self.read_list.remove(waiting_socket)
waiting_socket.close()
instruction = None
else:
self.respond_client({'message': 'Unknown Command',
'status': 'error'}, waiting_socket)
except Exception:
self.logger.exception()
# Wait for killed or stopped processes to finish (cleanup)
self.process_handler.wait_for_finish()
# Close socket, clean everything up and exit
self.socket.close()
cleanup(self.config_dir)
sys.exit(0) |
Kill current processes and initiate daemon shutdown.
The daemon will shut down after a last check on all killed processes.
def stop_daemon(self, payload=None):
"""Kill current processes and initiate daemon shutdown.
The daemon will shut down after a last check on all killed processes.
"""
kill_signal = signals['9']
self.process_handler.kill_all(kill_signal, True)
self.running = False
return {'message': 'Pueue daemon shutting down',
'status': 'success'} |
Update the current config depending on the payload and save it.
def set_config(self, payload):
"""Update the current config depending on the payload and save it."""
self.config['default'][payload['option']] = str(payload['value'])
if payload['option'] == 'maxProcesses':
self.process_handler.set_max(payload['value'])
if payload['option'] == 'customShell':
path = payload['value']
if os.path.isfile(path) and os.access(path, os.X_OK):
self.process_handler.set_shell(path)
elif path == 'default':
self.process_handler.set_shell()
else:
return {'message': "File in path doesn't exist or is not executable.",
'status': 'error'}
self.write_config()
return {'message': 'Configuration successfully updated.',
'status': 'success'} |
Send something to stdin of a specific process.
def pipe_to_process(self, payload):
"""Send something to stdin of a specific process."""
message = payload['input']
key = payload['key']
if not self.process_handler.is_running(key):
return {'message': 'No running process for this key',
'status': 'error'}
self.process_handler.send_to_process(message, key)
return {'message': 'Message sent',
'status': 'success'} |
Send the daemon status and the current queue for displaying.
def send_status(self, payload):
"""Send the daemon status and the current queue for displaying."""
answer = {}
data = []
# Get daemon status
if self.paused:
answer['status'] = 'paused'
else:
answer['status'] = 'running'
# Add current queue or a message, that queue is empty
if len(self.queue) > 0:
data = deepcopy(self.queue.queue)
# Remove stderr and stdout output for transfer
# Some outputs are way to big for the socket buffer
# and this is not needed by the client
for key, item in data.items():
if 'stderr' in item:
del item['stderr']
if 'stdout' in item:
del item['stdout']
else:
data = 'Queue is empty'
answer['data'] = data
return answer |
Kill all processes, delete the queue and clean everything up.
def reset_everything(self, payload):
"""Kill all processes, delete the queue and clean everything up."""
kill_signal = signals['9']
self.process_handler.kill_all(kill_signal, True)
self.process_handler.wait_for_finish()
self.reset = True
answer = {'message': 'Resetting current queue', 'status': 'success'}
return answer |
Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes.
def clear(self, payload):
"""Clear queue from any `done` or `failed` entries.
The log will be rotated once. Otherwise we would loose all logs from
thoes finished processes.
"""
self.logger.rotate(self.queue)
self.queue.clear()
self.logger.write(self.queue)
answer = {'message': 'Finished entries have been removed.', 'status': 'success'}
return answer |
Start the daemon and all processes or only specific processes.
def start(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Start specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.start_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Started processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo paused, queued or stashed process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Start a all processes and the daemon
else:
self.process_handler.start_all()
if self.paused:
self.paused = False
answer = {'message': 'Daemon and all processes started.',
'status': 'success'}
else:
answer = {'message': 'Daemon already running, starting all processes.',
'status': 'success'}
return answer |
Start the daemon and all processes or only specific processes.
def pause(self, payload):
"""Start the daemon and all processes or only specific processes."""
# Pause specific processes, if `keys` is given in the payload
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.pause_process(key)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Paused processes: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Pause all processes and the daemon
else:
if payload.get('wait'):
self.paused = True
answer = {'message': 'Pausing daemon, but waiting for processes to finish.',
'status': 'success'}
else:
self.process_handler.pause_all()
if not self.paused:
self.paused = True
answer = {'message': 'Daemon and all processes paused.',
'status': 'success'}
else:
answer = {'message': 'Daemon already paused, pausing all processes anyway.',
'status': 'success'}
return answer |
Edit the command of a specific entry.
def edit_command(self, payload):
"""Edit the command of a specific entry."""
key = payload['key']
command = payload['command']
if self.queue[key]:
if self.queue[key]['status'] in ['queued', 'stashed']:
self.queue[key]['command'] = command
answer = {'message': 'Command updated', 'status': 'error'}
else:
answer = {'message': "Entry is not 'queued' or 'stashed'",
'status': 'error'}
else:
answer = {'message': 'No entry with this key', 'status': 'error'}
# Pause all processes and the daemon
return answer |
Stash the specified processes.
def stash(self, payload):
"""Stash the specified processes."""
succeeded = []
failed = []
for key in payload['keys']:
if self.queue.get(key) is not None:
if self.queue[key]['status'] == 'queued':
self.queue[key]['status'] = 'stashed'
succeeded.append(str(key))
else:
failed.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Stashed entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo queued entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer |
Pause the daemon and kill all processes or kill a specific process.
def kill_process(self, payload):
"""Pause the daemon and kill all processes or kill a specific process."""
# Kill specific processes, if `keys` is given in the payload
kill_signal = signals[payload['signal'].lower()]
kill_shell = payload.get('all', False)
if payload.get('keys'):
succeeded = []
failed = []
for key in payload.get('keys'):
success = self.process_handler.kill_process(key, kill_signal, kill_shell)
if success:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += "Signal '{}' sent to processes: {}.".format(payload['signal'], ', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo running process for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
# Kill all processes and the daemon
else:
self.process_handler.kill_all(kill_signal, kill_shell)
if kill_signal == signal.SIGINT or \
kill_signal == signal.SIGTERM or \
kill_signal == signal.SIGKILL:
self.paused = True
answer = {'message': 'Signal send to all processes.',
'status': 'success'}
return answer |
Remove specified entries from the queue.
def remove(self, payload):
"""Remove specified entries from the queue."""
succeeded = []
failed = []
for key in payload['keys']:
running = self.process_handler.is_running(key)
if not running:
removed = self.queue.remove(key)
if removed:
succeeded.append(str(key))
else:
failed.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Removed entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nRunning or non-existing entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer |
Switch the two specified entry positions in the queue.
def switch(self, payload):
"""Switch the two specified entry positions in the queue."""
first = payload['first']
second = payload['second']
running = self.process_handler.is_running(first) or self.process_handler.is_running(second)
if running:
answer = {
'message': "Can't switch running processes, "
"please stop the processes before switching them.",
'status': 'error'
}
else:
switched = self.queue.switch(first, second)
if switched:
answer = {
'message': 'Entries #{} and #{} switched'.format(first, second),
'status': 'success'
}
else:
answer = {'message': "One or both entries do not exist or are not queued/stashed.",
'status': 'error'}
return answer |
Restart the specified entries.
def restart(self, payload):
"""Restart the specified entries."""
succeeded = []
failed = []
for key in payload['keys']:
restarted = self.queue.restart(key)
if restarted:
succeeded.append(str(key))
else:
failed.append(str(key))
message = ''
if len(succeeded) > 0:
message += 'Restarted entries: {}.'.format(', '.join(succeeded))
status = 'success'
if len(failed) > 0:
message += '\nNo finished entry for keys: {}'.format(', '.join(failed))
status = 'error'
answer = {'message': message.strip(), 'status': status}
return answer |
Same as socket.sendall
def sendall(self, data, flags=0):
'''Same as socket.sendall'''
count = len(data)
while count:
sent = self.send(data, flags)
# This could probably be a buffer object
data = data[sent:]
count -= sent |
List directory
def do_ls(client, args):
"""List directory"""
for item in client.get_folder_contents_iter(args.uri):
# privacy flag
if item['privacy'] == 'public':
item['pf'] = '@'
else:
item['pf'] = '-'
if isinstance(item, Folder):
# type flag
item['tf'] = 'd'
item['key'] = item['folderkey']
item['size'] = ''
else:
item['tf'] = '-'
item['key'] = item['quickkey']
item['name'] = item['filename']
print("{tf}{pf} {key:>15} {size:>10} {created} {name}".format(**item))
return True |
Upload files
def do_file_upload(client, args):
"""Upload files"""
# Sanity check
if len(args.paths) > 1:
# destination must be a directory
try:
resource = client.get_resource_by_uri(args.dest_uri)
except ResourceNotFoundError:
resource = None
if resource and not isinstance(resource, Folder):
print("file-upload: "
"target '{}' is not a directory".format(args.dest_uri))
return None
with client.upload_session():
for src_path in args.paths:
print("Uploading {} to {}".format(src_path, args.dest_uri))
result = client.upload_file(src_path, args.dest_uri)
print("Uploaded {}, result={}".format(src_path, result))
return True |
Download file
def do_file_download(client, args):
"""Download file"""
# Sanity check
if not os.path.isdir(args.dest_path) and not args.dest_path.endswith('/'):
print("file-download: "
"target '{}' is not a directory".format(args.dest_path))
if not os.path.exists(args.dest_path):
print("\tHint: add trailing / to create one")
return None
for src_uri in args.uris:
print("Downloading {} to {}".format(src_uri, args.dest_path))
client.download_file(src_uri, args.dest_path)
print("Downloaded {}".format(src_uri))
return True |
Output file contents to stdout
def do_file_show(client, args):
"""Output file contents to stdout"""
for src_uri in args.uris:
client.download_file(src_uri, sys.stdout.buffer)
return True |
Create directory
def do_folder_create(client, args):
"""Create directory"""
for folder_uri in args.uris:
client.create_folder(folder_uri, recursive=True)
return True |
Remove resource
def do_resource_delete(client, args):
"""Remove resource"""
for resource_uri in args.uris:
client.delete_resource(resource_uri, purge=args.purge)
print("Deleted {}".format(resource_uri))
return True |
Update file metadata
def do_file_update_metadata(client, args):
"""Update file metadata"""
client.update_file_metadata(args.uri, filename=args.filename,
description=args.description, mtime=args.mtime,
privacy=args.privacy)
return True |
Update file metadata
def do_folder_update_metadata(client, args):
"""Update file metadata"""
client.update_folder_metadata(args.uri, foldername=args.foldername,
description=args.description,
mtime=args.mtime, privacy=args.privacy,
privacy_recursive=args.recursive)
return True |
Main entry point
def main(): # pylint: disable=too-many-statements
"""Main entry point"""
parser = argparse.ArgumentParser(prog='mediafire-cli',
description=__doc__)
parser.add_argument('--debug', dest='debug', action='store_true',
default=False, help='Enable debug output')
parser.add_argument('--email', dest='email', required=False,
default=os.environ.get('MEDIAFIRE_EMAIL', None))
parser.add_argument('--password', dest='password', required=False,
default=os.environ.get('MEDIAFIRE_PASSWORD', None))
actions = parser.add_subparsers(title='Actions', dest='action')
# http://bugs.python.org/issue9253#msg186387
actions.required = True
# ls
subparser = actions.add_parser('ls',
help=do_ls.__doc__)
subparser.add_argument('uri', nargs='?',
help='MediaFire URI',
default='mf:///')
# file-upload
subparser = actions.add_parser('file-upload',
help=do_file_upload.__doc__)
subparser.add_argument('paths', nargs='+',
help='Path[s] to upload')
subparser.add_argument('dest_uri', help='Destination MediaFire URI')
# file-download
subparser = actions.add_parser('file-download',
help=do_file_download.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire File URI[s] to download')
subparser.add_argument('dest_path', help='Destination path')
# file-show
subparser = actions.add_parser('file-show',
help=do_file_show.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire File URI[s] to print out')
# folder-create
subparser = actions.add_parser('folder-create',
help=do_folder_create.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire folder path URI[s]')
# resource-delete
subparser = actions.add_parser('resource-delete',
help=do_resource_delete.__doc__)
subparser.add_argument('uris', nargs='+',
help='MediaFire resource URI[s]')
subparser.add_argument('--purge', help="Purge, don't send to trash",
dest="purge", action="store_true", default=False)
# file-update-metadata
subparser = actions.add_parser('file-update-metadata',
help=do_file_update_metadata.__doc__)
subparser.add_argument('uri', help='MediaFire file URI')
subparser.add_argument('--filename', help='Set file name',
default=None, dest='filename')
subparser.add_argument('--privacy', help='Set file privacy',
choices=['public', 'private'],
default=None, dest='privacy')
subparser.add_argument('--description',
help='Set file description',
dest='description', default=None)
subparser.add_argument('--mtime', help="Set file modification time",
dest='mtime', default=None)
# folder-update-metadata
subparser = actions.add_parser('folder-update-metadata',
help=do_folder_update_metadata.__doc__)
subparser.add_argument('uri', help='MediaFire folder URI')
subparser.add_argument('--foldername', help='Set folder name',
default=None, dest='foldername')
subparser.add_argument('--privacy', help='Set folder privacy',
choices=['public', 'private'],
default=None, dest='privacy')
subparser.add_argument('--recursive', help='Set privacy recursively',
action='store_true', default=None,
dest='recursive')
subparser.add_argument('--description',
help='Set folder description',
dest='description', default=None)
subparser.add_argument('--mtime', help='Set folder mtime',
default=None, dest='mtime')
# debug-get-resource
subparser = actions.add_parser('debug-get-resource',
help=do_debug_get_resource.__doc__)
subparser.add_argument('uri', help='MediaFire resource URI',
default='mediafire:/', nargs='?')
args = parser.parse_args()
if args.debug:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("mediafire.client").setLevel(logging.DEBUG)
client = MediaFireClient()
if args.email and args.password:
client.login(args.email, args.password, app_id=APP_ID)
router = {
"file-upload": do_file_upload,
"file-download": do_file_download,
"file-show": do_file_show,
"ls": do_ls,
"folder-create": do_folder_create,
"resource-delete": do_resource_delete,
"file-update-metadata": do_file_update_metadata,
"folder-update-metadata": do_folder_update_metadata,
"debug-get-resource": do_debug_get_resource
}
if args.action in router:
result = router[args.action](client, args)
if not result:
sys.exit(1)
else:
print('Unsupported action: {}'.format(args.action))
sys.exit(1) |
Publish a message to a topic
def pub(self, topic, message):
'''Publish a message to a topic'''
return self.post('pub', params={'topic': topic}, data=message) |
Send multiple messages to a topic. Optionally pack the messages
def mpub(self, topic, messages, binary=True):
'''Send multiple messages to a topic. Optionally pack the messages'''
if binary:
# Pack and ship the data
return self.post('mpub', data=pack(messages)[4:],
params={'topic': topic, 'binary': True})
elif any('\n' in m for m in messages):
# If any of the messages has a newline, then you must use the binary
# calling format
raise ClientException(
'Use `binary` flag in mpub for messages with newlines')
else:
return self.post(
'/mpub', params={'topic': topic}, data='\n'.join(messages)) |
Stats with topics and channels keyed on topic and channel names
def clean_stats(self):
'''Stats with topics and channels keyed on topic and channel names'''
stats = self.stats()
if 'topics' in stats: # pragma: no branch
topics = stats['topics']
topics = dict((t.pop('topic_name'), t) for t in topics)
for topic, data in topics.items():
if 'channels' in data: # pragma: no branch
channels = data['channels']
channels = dict(
(c.pop('channel_name'), c) for c in channels)
data['channels'] = channels
stats['topics'] = topics
return stats |
Add a new command to the daemon queue.
Args:
args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']
root_dir (string): The path to the root directory the daemon is running in.
def execute_add(args, root_dir=None):
"""Add a new command to the daemon queue.
Args:
args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al']
root_dir (string): The path to the root directory the daemon is running in.
"""
# We accept a list of strings.
# This is done to create a better commandline experience with argparse.
command = ' '.join(args['command'])
# Send new instruction to daemon
instruction = {
'command': command,
'path': os.getcwd()
}
print_command_factory('add')(instruction, root_dir) |
Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in.
def execute_edit(args, root_dir=None):
"""Edit a existing queue command in the daemon.
Args:
args['key'] int: The key of the queue entry to be edited
root_dir (string): The path to the root directory the daemon is running in.
"""
# Get editor
EDITOR = os.environ.get('EDITOR', 'vim')
# Get command from server
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
# Check if queue is not empty, the entry exists and is queued or stashed
if not isinstance(status['data'], str) and key in status['data']:
if status['data'][key]['status'] in ['queued', 'stashed']:
command = status['data'][key]['command']
else:
print("Entry is not 'queued' or 'stashed'")
sys.exit(1)
else:
print('No entry with this key')
sys.exit(1)
with tempfile.NamedTemporaryFile(suffix=".tmp") as tf:
tf.write(command.encode('utf-8'))
tf.flush()
call([EDITOR, tf.name])
# do the parsing with `tf` using regular File operations.
# for instance:
tf.seek(0)
edited_command = tf.read().decode('utf-8')
print_command_factory('edit')({
'key': key,
'command': edited_command,
}, root_dir=root_dir) |
A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate |
Create file descriptors for process output.
def get_descriptor(self, number):
"""Create file descriptors for process output."""
# Create stdout file and get file descriptor
stdout_path = os.path.join(self.config_dir,
'pueue_process_{}.stdout'.format(number))
if os.path.exists(stdout_path):
os.remove(stdout_path)
out_descriptor = open(stdout_path, 'w+')
# Create stderr file and get file descriptor
stderr_path = os.path.join(self.config_dir,
'pueue_process_{}.stderr'.format(number))
if os.path.exists(stderr_path):
os.remove(stderr_path)
err_descriptor = open(stderr_path, 'w+')
self.descriptors[number] = {}
self.descriptors[number]['stdout'] = out_descriptor
self.descriptors[number]['stdout_path'] = stdout_path
self.descriptors[number]['stderr'] = err_descriptor
self.descriptors[number]['stderr_path'] = stderr_path
return out_descriptor, err_descriptor |
Close file descriptor and remove underlying files.
def clean_descriptor(self, number):
"""Close file descriptor and remove underlying files."""
self.descriptors[number]['stdout'].close()
self.descriptors[number]['stderr'].close()
if os.path.exists(self.descriptors[number]['stdout_path']):
os.remove(self.descriptors[number]['stdout_path'])
if os.path.exists(self.descriptors[number]['stderr_path']):
os.remove(self.descriptors[number]['stderr_path']) |
Poll all processes and handle any finished processes.
def check_finished(self):
"""Poll all processes and handle any finished processes."""
changed = False
for key in list(self.processes.keys()):
# Poll process and check if it finshed
process = self.processes[key]
process.poll()
if process.returncode is not None:
# If a process is terminated by `stop` or `kill`
# we want to queue it again instead closing it as failed.
if key not in self.stopping:
# Get std_out and err_out
output, error_output = process.communicate()
descriptor = self.descriptors[key]
descriptor['stdout'].seek(0)
descriptor['stderr'].seek(0)
output = get_descriptor_output(descriptor['stdout'], key, handler=self)
error_output = get_descriptor_output(descriptor['stderr'], key, handler=self)
# Mark queue entry as finished and save returncode
self.queue[key]['returncode'] = process.returncode
if process.returncode != 0:
self.queue[key]['status'] = 'failed'
else:
self.queue[key]['status'] = 'done'
# Add outputs to queue
self.queue[key]['stdout'] = output
self.queue[key]['stderr'] = error_output
self.queue[key]['end'] = str(datetime.now().strftime("%H:%M"))
self.queue.write()
changed = True
else:
self.stopping.remove(key)
if key in self.to_remove:
self.to_remove.remove(key)
del self.queue[key]
else:
if key in self.to_stash:
self.to_stash.remove(key)
self.queue[key]['status'] = 'stashed'
else:
self.queue[key]['status'] = 'queued'
self.queue[key]['start'] = ''
self.queue[key]['end'] = ''
self.queue.write()
self.clean_descriptor(key)
del self.processes[key]
# If anything should be logged we return True
return changed |
Check if we can start a new process.
def check_for_new(self):
"""Check if we can start a new process."""
free_slots = self.max_processes - len(self.processes)
for item in range(free_slots):
key = self.queue.next()
if key is not None:
self.spawn_new(key) |
Spawn a new task and save it to the queue.
def spawn_new(self, key):
"""Spawn a new task and save it to the queue."""
# Check if path exists
if not os.path.exists(self.queue[key]['path']):
self.queue[key]['status'] = 'failed'
error_msg = "The directory for this command doesn't exist anymore: {}".format(self.queue[key]['path'])
self.logger.error(error_msg)
self.queue[key]['stdout'] = ''
self.queue[key]['stderr'] = error_msg
else:
# Get file descriptors
stdout, stderr = self.get_descriptor(key)
if self.custom_shell != 'default':
# Create subprocess
self.processes[key] = subprocess.Popen(
[
self.custom_shell,
'-i',
'-c',
self.queue[key]['command'],
],
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
else:
# Create subprocess
self.processes[key] = subprocess.Popen(
self.queue[key]['command'],
shell=True,
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE,
universal_newlines=True,
preexec_fn=os.setsid,
cwd=self.queue[key]['path']
)
self.queue[key]['status'] = 'running'
self.queue[key]['start'] = str(datetime.now().strftime("%H:%M"))
self.queue.write() |
Kill all running processes.
def kill_all(self, kill_signal, kill_shell=False):
"""Kill all running processes."""
for key in self.processes.keys():
self.kill_process(key, kill_signal, kill_shell) |
Start a specific processes.
def start_process(self, key):
"""Start a specific processes."""
if key in self.processes and key in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGCONT)
self.queue[key]['status'] = 'running'
self.paused.remove(key)
return True
elif key not in self.processes:
if self.queue[key]['status'] in ['queued', 'stashed']:
self.spawn_new(key)
return True
return False |
Pause a specific processes.
def pause_process(self, key):
"""Pause a specific processes."""
if key in self.processes and key not in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGSTOP)
self.queue[key]['status'] = 'paused'
self.paused.append(key)
return True
return False |
Create a closure which creates a running daemon.
We need to create a closure that contains the correct path the daemon should
be started with. This is needed as the `Daemonize` library
requires a callable function for daemonization and doesn't accept any arguments.
This function cleans up sockets and output files in case we encounter any exceptions.
def daemon_factory(path):
"""Create a closure which creates a running daemon.
We need to create a closure that contains the correct path the daemon should
be started with. This is needed as the `Daemonize` library
requires a callable function for daemonization and doesn't accept any arguments.
This function cleans up sockets and output files in case we encounter any exceptions.
"""
def start_daemon():
root_dir = path
config_dir = os.path.join(root_dir, '.config/pueue')
try:
daemon = Daemon(root_dir=root_dir)
daemon.main()
except KeyboardInterrupt:
print('Keyboard interrupt. Shutting down')
daemon.stop_daemon()
except Exception:
try:
daemon.stop_daemon()
except Exception:
pass
cleanup(config_dir)
raise
return start_daemon |
Execute entry function.
def main():
"""Execute entry function."""
args = parser.parse_args()
args_dict = vars(args)
root_dir = args_dict['root'] if 'root' in args else None
# If a root directory is specified, get the absolute path and
# check if it exists. Abort if it doesn't exist!
if root_dir:
root_dir = os.path.abspath(root_dir)
if not os.path.exists(root_dir):
print("The specified directory doesn't exist!")
sys.exit(1)
# Default to home directory if no root is specified
else:
root_dir = os.path.expanduser('~')
if args.stopdaemon:
print_command_factory('STOPDAEMON')(vars(args), root_dir)
elif args.nodaemon:
daemon_factory(root_dir)()
elif args.daemon:
config_dir = os.path.join(root_dir, '.config/pueue')
os.makedirs(config_dir, exist_ok=True)
daemon = Daemonize(app='pueue', pid=os.path.join(config_dir, 'pueue.pid'),
action=daemon_factory(root_dir), chdir=root_dir)
daemon.start()
elif hasattr(args, 'func'):
try:
args.func(args_dict, root_dir)
except EOFError:
print('Apparently the daemon just died. Sorry for that :/')
else:
print('Invalid Command. Please check -h') |
Register a pdb handler for signal 'signum'.
The handler sets pdb to listen on the ('host', 'port') internet address
and to start a remote debugging session on accepting a socket connection.
def register(host=DFLT_ADDRESS[0], port=DFLT_ADDRESS[1],
signum=signal.SIGUSR1):
"""Register a pdb handler for signal 'signum'.
The handler sets pdb to listen on the ('host', 'port') internet address
and to start a remote debugging session on accepting a socket connection.
"""
_pdbhandler._register(host, port, signum) |
Return the handler as a named tuple.
The named tuple attributes are 'host', 'port', 'signum'.
Return None when no handler has been registered.
def get_handler():
"""Return the handler as a named tuple.
The named tuple attributes are 'host', 'port', 'signum'.
Return None when no handler has been registered.
"""
host, port, signum = _pdbhandler._registered()
if signum:
return Handler(host if host else DFLT_ADDRESS[0].encode(),
port if port else DFLT_ADDRESS[1], signum) |
Wait for the provided time to elapse
def wait(self, timeout):
'''Wait for the provided time to elapse'''
logger.debug('Waiting for %fs', timeout)
return self._event.wait(timeout) |
How long to wait before the next check
def delay(self):
'''How long to wait before the next check'''
if self._last_checked:
return self._interval - (time.time() - self._last_checked)
return self._interval |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.