text
stringlengths
81
112k
Add tags to a server. Accepts tags as strings or Tag objects. def add_tags(self, tags): """ Add tags to a server. Accepts tags as strings or Tag objects. """ if self.cloud_manager.assign_tags(self.uuid, tags): tags = self.tags + [str(tag) for tag in tags] object.__setattr__(self, 'tags', tags)
Add tags to a server. Accepts tags as strings or Tag objects. def remove_tags(self, tags): """ Add tags to a server. Accepts tags as strings or Tag objects. """ if self.cloud_manager.remove_tags(self, tags): new_tags = [tag for tag in self.tags if tag not in tags] object.__setattr__(self, 'tags', new_tags)
Helper function for automatically adding several FirewallRules in series. def configure_firewall(self, FirewallRules): """ Helper function for automatically adding several FirewallRules in series. """ firewall_rule_bodies = [ FirewallRule.to_dict() for FirewallRule in FirewallRules ] return self.cloud_manager.configure_firewall(self, firewall_rule_bodies)
Prepare a JSON serializable dict from a Server instance with nested. Storage instances. def prepare_post_body(self): """ Prepare a JSON serializable dict from a Server instance with nested. Storage instances. """ body = dict() # mandatory body['server'] = { 'hostname': self.hostname, 'zone': self.zone, 'title': self.title, 'storage_devices': {} } # optional fields for optional_field in self.optional_fields: if hasattr(self, optional_field): body['server'][optional_field] = getattr(self, optional_field) # set password_delivery default as 'none' to prevent API from sending # emails (with credentials) about each created server if not hasattr(self, 'password_delivery'): body['server']['password_delivery'] = 'none' # collect storage devices and create a unique title (see: Storage.title in API doc) # for each of them body['server']['storage_devices'] = { 'storage_device': [] } storage_title_id = 0 # running number for unique storage titles for storage in self.storage_devices: if not hasattr(storage, 'os') or storage.os is None: storage_title_id += 1 storage_body = storage.to_dict() # setup default titles for storages unless the user has specified # them at storage.title if not hasattr(storage, 'title') or not storage.title: if hasattr(storage, 'os') and storage.os: storage_body['title'] = self.hostname + ' OS disk' else: storage_body['title'] = self.hostname + ' storage disk ' + str(storage_title_id) # figure out the storage `action` parameter # public template if hasattr(storage, 'os') and storage.os: storage_body['action'] = 'clone' storage_body['storage'] = OperatingSystems.get_OS_UUID(storage.os) # private template elif hasattr(storage, 'uuid'): storage_body['action'] = 'clone' storage_body['storage'] = storage.uuid # create a new storage else: storage_body['action'] = 'create' body['server']['storage_devices']['storage_device'].append(storage_body) if hasattr(self, 'ip_addresses') and self.ip_addresses: body['server']['ip_addresses'] = { 'ip_address': [ ip.to_dict() for ip in self.ip_addresses ] } return body
Prepare a JSON serializable dict for read-only purposes. Includes storages and IP-addresses. Use prepare_post_body for POST and .save() for PUT. def to_dict(self): """ Prepare a JSON serializable dict for read-only purposes. Includes storages and IP-addresses. Use prepare_post_body for POST and .save() for PUT. """ fields = dict(vars(self).items()) if self.populated: fields['ip_addresses'] = [] fields['storage_devices'] = [] for ip in self.ip_addresses: fields['ip_addresses'].append({ 'address': ip.address, 'access': ip.access, 'family': ip.family }) for storage in self.storage_devices: fields['storage_devices'].append({ 'address': storage.address, 'storage': storage.uuid, 'storage_size': storage.size, 'storage_title': storage.title, 'type': storage.type, }) del fields['populated'] del fields['cloud_manager'] return fields
Return the server's IP address. Params: - addr_family: IPv4, IPv6 or None. None prefers IPv4 but will return IPv6 if IPv4 addr was not available. - access: 'public' or 'private' def get_ip(self, access='public', addr_family=None, strict=None): """ Return the server's IP address. Params: - addr_family: IPv4, IPv6 or None. None prefers IPv4 but will return IPv6 if IPv4 addr was not available. - access: 'public' or 'private' """ if addr_family not in ['IPv4', 'IPv6', None]: raise Exception("`addr_family` must be 'IPv4', 'IPv6' or None") if access not in ['private', 'public']: raise Exception("`access` must be 'public' or 'private'") if not hasattr(self, 'ip_addresses'): self.populate() # server can have several public or private IPs ip_addrs = [ ip_addr for ip_addr in self.ip_addresses if ip_addr.access == access ] # prefer addr_family (or IPv4 if none given) preferred_family = addr_family if addr_family else 'IPv4' for ip_addr in ip_addrs: if ip_addr.family == preferred_family: return ip_addr.address # any IP (of the right access) will do if available and addr_family is None return ip_addrs[0].address if ip_addrs and not addr_family else None
Alias for get_ip('public') def get_public_ip(self, addr_family=None, *args, **kwargs): """Alias for get_ip('public')""" return self.get_ip('public', addr_family, *args, **kwargs)
Alias for get_ip('private') def get_private_ip(self, addr_family=None, *args, **kwargs): """Alias for get_ip('private')""" return self.get_ip('private', addr_family, *args, **kwargs)
Blocking wait until target_state reached. update_interval is in seconds. Warning: state change must begin before calling this method. def _wait_for_state_change(self, target_states, update_interval=10): """ Blocking wait until target_state reached. update_interval is in seconds. Warning: state change must begin before calling this method. """ while self.state not in target_states: if self.state == 'error': raise Exception('server is in error state') # update server state every 10s sleep(update_interval) self.populate()
Start a server and waits (blocking wait) until it is fully started. def ensure_started(self): """ Start a server and waits (blocking wait) until it is fully started. """ # server is either starting or stopping (or error) if self.state in ['maintenance', 'error']: self._wait_for_state_change(['stopped', 'started']) if self.state == 'stopped': self.start() self._wait_for_state_change(['started']) if self.state == 'started': return True else: # something went wrong, fail explicitly raise Exception('unknown server state: ' + self.state)
Destroy a server and its storages. Stops the server before destroying. Syncs the server state from the API, use sync=False to disable. def stop_and_destroy(self, sync=True): """ Destroy a server and its storages. Stops the server before destroying. Syncs the server state from the API, use sync=False to disable. """ def _self_destruct(): """destroy the server and all storages attached to it.""" # try_it_n_times util is used as a convenience because # Servers and Storages can fluctuate between "maintenance" and their # original state due to several different reasons especially when # destroying infrastructure. # first destroy server try_it_n_times(operation=self.destroy, expected_error_codes=['SERVER_STATE_ILLEGAL'], custom_error='destroying server failed') # storages may be deleted instantly after server DELETE for storage in self.storage_devices: try_it_n_times(operation=storage.destroy, expected_error_codes=['STORAGE_STATE_ILLEGAL'], custom_error='destroying storage failed') if sync: self.populate() # server is either starting or stopping (or error) if self.state in ['maintenance', 'error']: self._wait_for_state_change(['stopped', 'started']) if self.state == 'started': try_it_n_times(operation=self.stop, expected_error_codes=['SERVER_STATE_ILLEGAL'], custom_error='stopping server failed') self._wait_for_state_change(['stopped']) if self.state == 'stopped': _self_destruct() else: raise Exception('unknown server state: ' + self.state)
Revert the state to the version stored on disc. def revert(self): """Revert the state to the version stored on disc.""" if self.filepath: if path.isfile(self.filepath): serialised_file = open(self.filepath, "r") try: self.state = json.load(serialised_file) except ValueError: print("No JSON information could be read from the persistence file - could be empty: %s" % self.filepath) self.state = {} finally: serialised_file.close() else: print("The persistence file has not yet been created or does not exist, so the state cannot be read from it yet.") else: print("Filepath to the persistence file is not set. State cannot be read.") return False
Synchronise and update the stored state to the in-memory state. def sync(self): """Synchronise and update the stored state to the in-memory state.""" if self.filepath: serialised_file = open(self.filepath, "w") json.dump(self.state, serialised_file) serialised_file.close() else: print("Filepath to the persistence file is not set. State cannot be synced to disc.")
Reset after repopulating from API (or when initializing). def _reset(self, **kwargs): """ Reset after repopulating from API (or when initializing). """ # set object attributes from params for key in kwargs: setattr(self, key, kwargs[key]) # set defaults (if need be) where the default is not None for attr in self.ATTRIBUTES: if not hasattr(self, attr) and self.ATTRIBUTES[attr] is not None: setattr(self, attr, self.ATTRIBUTES[attr])
Return a dict that can be serialised to JSON and sent to UpCloud's API. def to_dict(self): """ Return a dict that can be serialised to JSON and sent to UpCloud's API. """ return dict( (attr, getattr(self, attr)) for attr in self.ATTRIBUTES if hasattr(self, attr) )
Also try to create the bucket. def _require_bucket(self, bucket_name): """ Also try to create the bucket. """ if not self.exists(bucket_name) and not self.claim_bucket(bucket_name): raise OFSException("Invalid bucket: %s" % bucket_name) return self._get_bucket(bucket_name)
Will fail if the bucket or label don't exist def del_stream(self, bucket, label): """ Will fail if the bucket or label don't exist """ bucket = self._require_bucket(bucket) key = self._require_key(bucket, label) key.delete()
Authenticate a HTTP request by filling in Authorization field header. :param method: HTTP method (e.g. GET, PUT, POST) :param bucket: name of the bucket. :param key: name of key within bucket. :param headers: dictionary of additional HTTP headers. :return: boto.connection.HTTPRequest object with Authorization header filled (NB: will also have a Date field if none before and a User-Agent field will be set to Boto). def authenticate_request(self, method, bucket='', key='', headers=None): '''Authenticate a HTTP request by filling in Authorization field header. :param method: HTTP method (e.g. GET, PUT, POST) :param bucket: name of the bucket. :param key: name of key within bucket. :param headers: dictionary of additional HTTP headers. :return: boto.connection.HTTPRequest object with Authorization header filled (NB: will also have a Date field if none before and a User-Agent field will be set to Boto). ''' # following is extracted from S3Connection.make_request and the method # it calls: AWSAuthConnection.make_request path = self.conn.calling_format.build_path_base(bucket, key) auth_path = self.conn.calling_format.build_auth_path(bucket, key) http_request = boto.connection.AWSAuthConnection.build_base_http_request( self.conn, method, path, auth_path, {}, headers ) http_request.authorize(connection=self.conn) return http_request
Return a list of resource IDs to check for broken links. Calls the client site's API to get a list of resource IDs. :raises CouldNotGetResourceIDsError: if getting the resource IDs fails for any reason def get_resources_to_check(client_site_url, apikey): """Return a list of resource IDs to check for broken links. Calls the client site's API to get a list of resource IDs. :raises CouldNotGetResourceIDsError: if getting the resource IDs fails for any reason """ url = client_site_url + u"deadoralive/get_resources_to_check" response = requests.get(url, headers=dict(Authorization=apikey)) if not response.ok: raise CouldNotGetResourceIDsError( u"Couldn't get resource IDs to check: {code} {reason}".format( code=response.status_code, reason=response.reason)) return response.json()
Return the URL for the given resource ID. Contacts the client site's API to get the URL for the ID and returns it. :raises CouldNotGetURLError: if getting the URL fails for any reason def get_url_for_id(client_site_url, apikey, resource_id): """Return the URL for the given resource ID. Contacts the client site's API to get the URL for the ID and returns it. :raises CouldNotGetURLError: if getting the URL fails for any reason """ # TODO: Handle invalid responses from the client site. url = client_site_url + u"deadoralive/get_url_for_resource_id" params = {"resource_id": resource_id} response = requests.get(url, headers=dict(Authorization=apikey), params=params) if not response.ok: raise CouldNotGetURLError( u"Couldn't get URL for resource {id}: {code} {reason}".format( id=resource_id, code=response.status_code, reason=response.reason)) return response.json()
Check whether the given URL is dead or alive. Returns a dict with four keys: "url": The URL that was checked (string) "alive": Whether the URL was working, True or False "status": The HTTP status code of the response from the URL, e.g. 200, 401, 500 (int) "reason": The reason for the success or failure of the check, e.g. "OK", "Unauthorized", "Internal Server Error" (string) The "status" may be None if we did not get a valid HTTP response, e.g. in the event of a timeout, DNS failure or invalid HTTP response. The "reason" will always be a string, but may be a requests library exception string rather than an HTTP reason string if we did not get a valid HTTP response. def check_url(url): """Check whether the given URL is dead or alive. Returns a dict with four keys: "url": The URL that was checked (string) "alive": Whether the URL was working, True or False "status": The HTTP status code of the response from the URL, e.g. 200, 401, 500 (int) "reason": The reason for the success or failure of the check, e.g. "OK", "Unauthorized", "Internal Server Error" (string) The "status" may be None if we did not get a valid HTTP response, e.g. in the event of a timeout, DNS failure or invalid HTTP response. The "reason" will always be a string, but may be a requests library exception string rather than an HTTP reason string if we did not get a valid HTTP response. """ result = {"url": url} try: response = requests.get(url) result["status"] = response.status_code result["reason"] = response.reason response.raise_for_status() # Raise if status_code is not OK. result["alive"] = True except AttributeError as err: if err.message == "'NoneType' object has no attribute 'encode'": # requests seems to throw these for some invalid URLs. result["alive"] = False result["reason"] = "Invalid URL" result["status"] = None else: raise except requests.exceptions.RequestException as err: result["alive"] = False if "reason" not in result: result["reason"] = str(err) if "status" not in result: # This can happen if the response is invalid HTTP, if we get a DNS # failure, or a timeout, etc. result["status"] = None # We should always have these four fields in the result. assert "url" in result assert result.get("alive") in (True, False) assert "status" in result assert "reason" in result return result
Post the given link check result to the client site. def upsert_result(client_site_url, apikey, resource_id, result): """Post the given link check result to the client site.""" # TODO: Handle exceptions and unexpected results. url = client_site_url + u"deadoralive/upsert" params = result.copy() params["resource_id"] = resource_id requests.post(url, headers=dict(Authorization=apikey), params=params)
Get links from the client site, check them, and post the results back. Get resource IDs from the client site, get the URL for each resource ID from the client site, check each URL, and post the results back to the client site. This function can be called repeatedly to keep on getting more links from the client site and checking them. The functions that this function calls to carry out the various tasks are taken as parameters to this function for testing purposes - it makes it easy for tests to pass in mock functions. It also decouples the code nicely. :param client_site_url: the base URL of the client site :type client_site_url: string :param apikey: the API key to use when making requests to the client site :type apikey: string or None :param get_resource_ids_to_check: The function to call to get the list of resource IDs to be checked from the client site. See get_resource_ids_to_check() above for the interface that this function should implement. :type get_resource_ids_to_check: callable :param get_url_for_id: The function to call to get the URL for a given resource ID from the client site. See get_url_for_id() above for the interface that this function should implement. :type get_url_for_id: callable :param check_url: The function to call to check whether a URL is dead or alive. See check_url() above for the interface that this function should implement. :type check_url: callable :param upsert_result: The function to call to post a link check result to the client site. See upsert_result() above for the interface that this function should implement. :type upsert_result: callable def get_check_and_report(client_site_url, apikey, get_resource_ids_to_check, get_url_for_id, check_url, upsert_result): """Get links from the client site, check them, and post the results back. Get resource IDs from the client site, get the URL for each resource ID from the client site, check each URL, and post the results back to the client site. This function can be called repeatedly to keep on getting more links from the client site and checking them. The functions that this function calls to carry out the various tasks are taken as parameters to this function for testing purposes - it makes it easy for tests to pass in mock functions. It also decouples the code nicely. :param client_site_url: the base URL of the client site :type client_site_url: string :param apikey: the API key to use when making requests to the client site :type apikey: string or None :param get_resource_ids_to_check: The function to call to get the list of resource IDs to be checked from the client site. See get_resource_ids_to_check() above for the interface that this function should implement. :type get_resource_ids_to_check: callable :param get_url_for_id: The function to call to get the URL for a given resource ID from the client site. See get_url_for_id() above for the interface that this function should implement. :type get_url_for_id: callable :param check_url: The function to call to check whether a URL is dead or alive. See check_url() above for the interface that this function should implement. :type check_url: callable :param upsert_result: The function to call to post a link check result to the client site. See upsert_result() above for the interface that this function should implement. :type upsert_result: callable """ logger = _get_logger() resource_ids = get_resource_ids_to_check(client_site_url, apikey) for resource_id in resource_ids: try: url = get_url_for_id(client_site_url, apikey, resource_id) except CouldNotGetURLError: logger.info(u"This link checker was not authorized to access " "resource {0}, skipping.".format(resource_id)) continue result = check_url(url) status = result["status"] reason = result["reason"] if result["alive"]: logger.info(u"Checking URL {0} of resource {1} succeeded with " "status {2}:".format(url, resource_id, status)) else: logger.info(u"Checking URL {0} of resource {1} failed with error " "{2}:".format(url, resource_id, reason)) upsert_result(client_site_url, apikey, resource_id=resource_id, result=result)
Returns buffered bytes without advancing the position. def peek(self, n=1): """Returns buffered bytes without advancing the position.""" if n > len(self._readbuffer) - self._offset: chunk = self.read(n) self._offset -= len(chunk) # Return up to 512 bytes to reduce allocation overhead for tight loops. return self._readbuffer[self._offset: self._offset + 512]
Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. def read(self, n=-1): """Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. """ buf = b'' while n < 0 or n is None or n > len(buf): data = self.read1(n) if len(data) == 0: return buf buf += data return buf
Read in the table of contents for the ZIP file. def _RealGetContents(self): """Read in the table of contents for the ZIP file.""" fp = self.fp endrec = _EndRecData(fp) if not endrec: raise BadZipfile("File is not a zip file") if self.debug > 1: print(endrec) size_cd = endrec[_ECD_SIZE] # bytes in central directory offset_cd = endrec[_ECD_OFFSET] # offset of central directory self.comment = endrec[_ECD_COMMENT] # archive comment # "concat" is zero, unless zip was concatenated to another file concat = endrec[_ECD_LOCATION] - size_cd - offset_cd if endrec[_ECD_SIGNATURE] == stringEndArchive64: # If Zip64 extension structures are present, account for them concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if self.debug > 2: inferred = concat + offset_cd print("given, inferred, offset", offset_cd, inferred, concat) # self.start_dir: Position of start of central directory self.start_dir = offset_cd + concat fp.seek(self.start_dir, 0) data = fp.read(size_cd) fp = cStringIO.StringIO(data) total = 0 while total < size_cd: centdir = fp.read(sizeCentralDir) if centdir[0:4] != stringCentralDir: raise BadZipfile("Bad magic number for central directory") centdir = struct.unpack(structCentralDir, centdir) if self.debug > 2: print(centdir) filename = fp.read(centdir[_CD_FILENAME_LENGTH]) # Create ZipInfo instance to store file information x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] x.volume, x.internal_attr, x.external_attr = centdir[15:18] # Convert date/time code to (year, month, day, hour, min, sec) x._raw_time = t x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) x._decodeExtra() x.header_offset = x.header_offset + concat x.filename = x._decodeFilename() self.filelist.append(x) self.NameToInfo[x.filename] = x # update total bytes read from central directory total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) if self.debug > 2: print("total", total)
Return file-like object for 'name'. def open(self, name, mode="r", pwd=None): """Return file-like object for 'name'.""" if mode not in ("r", "U", "rU"): raise RuntimeError('open() requires mode "r", "U", or "rU"') if not self.fp: raise RuntimeError( "Attempt to read ZIP archive that was already closed" ) # Only open a new file for instances where we were not # given a file object in the constructor if self._filePassed: zef_file = self.fp else: zef_file = open(self.filename, 'rb') # Make sure we have an info object if isinstance(name, ZipInfo): # 'name' is already an info object zinfo = name else: # Get info object for name zinfo = self.getinfo(name) zef_file.seek(zinfo.header_offset, 0) # Skip the file header: fheader = zef_file.read(sizeFileHeader) if fheader[0:4] != stringFileHeader: raise BadZipfile("Bad magic number for file header") fheader = struct.unpack(structFileHeader, fheader) fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) if fheader[_FH_EXTRA_FIELD_LENGTH]: zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) if fname != zinfo.orig_filename.encode('utf-8'): raise BadZipfile( 'File name in directory "%s" and header "%s" differ.' % ( zinfo.orig_filename, fname) ) # check for encrypted flag & handle password is_encrypted = zinfo.flag_bits & 0x1 zd = None if is_encrypted: if not pwd: pwd = self.pwd if not pwd: raise RuntimeError("File %s is encrypted, " \ "password required for extraction" % name) zd = _ZipDecrypter(pwd) # The first 12 bytes in the cypher stream is an encryption header # used to strengthen the algorithm. The first 11 bytes are # completely random, while the 12th contains the MSB of the CRC, # or the MSB of the file time depending on the header type # and is used to check the correctness of the password. bytes = zef_file.read(12) h = map(zd, bytes[0:12]) if zinfo.flag_bits & 0x8: # compare against the file type from extended local headers check_byte = (zinfo._raw_time >> 8) & 0xff else: # compare against the CRC otherwise check_byte = (zinfo.CRC >> 24) & 0xff if ord(h[11]) != check_byte: raise RuntimeError("Bad password for file", name) return ZipExtFile(zef_file, mode, zinfo, zd)
Remove a member from the archive. def remove(self, member): """Remove a member from the archive.""" # Make sure we have an info object if isinstance(member, ZipInfo): # 'member' is already an info object zinfo = member else: # Get info object for name zinfo = self.getinfo(member) # compute the location of the file data in the local file header, # by adding the lengths of the records before it zlen = len(zinfo.FileHeader()) + zinfo.compress_size fileidx = self.filelist.index(zinfo) fileofs = sum( [len(self.filelist[f].FileHeader()) + self.filelist[f].compress_size for f in xrange(0, fileidx)] ) self.fp.seek(fileofs + zlen) after = self.fp.read() self.fp.seek(fileofs) self.fp.write(after) self.fp.seek(-zlen, 2) self.fp.truncate() self._didModify = True self.filelist.remove(zinfo) del self.NameToInfo[member]
Return (filename, archivename) for the path. Given a module name path, return the correct file path and archive name, compiling if necessary. For example, given /python/lib/string, return (/python/lib/string.pyc, string). def _get_codename(self, pathname, basename): """Return (filename, archivename) for the path. Given a module name path, return the correct file path and archive name, compiling if necessary. For example, given /python/lib/string, return (/python/lib/string.pyc, string). """ file_py = pathname + ".py" file_pyc = pathname + ".pyc" file_pyo = pathname + ".pyo" if os.path.isfile(file_pyo) and \ os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime: fname = file_pyo # Use .pyo file elif not os.path.isfile(file_pyc) or \ os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime: import py_compile if self.debug: print("Compiling", file_py) try: py_compile.compile(file_py, file_pyc, None, True) except py_compile.PyCompileError as err: print(err.msg) fname = file_pyc else: fname = file_pyc archivename = os.path.split(fname)[1] if basename: archivename = "%s/%s" % (basename, archivename) return (fname, archivename)
Imports the class for the given class name. def import_class(class_path): ''' Imports the class for the given class name. ''' module_name, class_name = class_path.rsplit(".", 1) module = import_module(module_name) claz = getattr(module, class_name) return claz
Creating an ExecutorPool is a costly operation. Executor needs to be instantiated only once. def _executor(self): ''' Creating an ExecutorPool is a costly operation. Executor needs to be instantiated only once. ''' if self.EXECUTE_PARALLEL is False: executor_path = "batch_requests.concurrent.executor.SequentialExecutor" executor_class = import_class(executor_path) return executor_class() else: executor_path = self.CONCURRENT_EXECUTOR executor_class = import_class(executor_path) return executor_class(self.NUM_WORKERS)
this borrows too much from the internals of ofs maybe expose different parts of the api? def make_label(self, path): """ this borrows too much from the internals of ofs maybe expose different parts of the api? """ from datetime import datetime from StringIO import StringIO path = path.lstrip("/") bucket, label = path.split("/", 1) bucket = self.ofs._require_bucket(bucket) key = self.ofs._get_key(bucket, label) if key is None: key = bucket.new_key(label) self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) }) key.set_contents_from_file(StringIO('')) key.close()
stub. this really needs to be a call to the remote restful interface to get the appropriate host and headers to use for this upload def get_proxy_config(self, headers, path): """ stub. this really needs to be a call to the remote restful interface to get the appropriate host and headers to use for this upload """ self.ofs.conn.add_aws_auth_header(headers, 'PUT', path) from pprint import pprint pprint(headers) host = self.ofs.conn.server_name() return host, headers
This is the main function that uploads. We assume the bucket and key (== path) exists. What we do here is simple. Calculate the headers we will need, (e.g. md5, content-type, etc). Then we ask the self.get_proxy_config method to fill in the authentication information and tell us which remote host we should talk to for the upload. From there, the rest is ripped from boto.key.Key.send_file def proxy_upload(self, path, filename, content_type=None, content_encoding=None, cb=None, num_cb=None): """ This is the main function that uploads. We assume the bucket and key (== path) exists. What we do here is simple. Calculate the headers we will need, (e.g. md5, content-type, etc). Then we ask the self.get_proxy_config method to fill in the authentication information and tell us which remote host we should talk to for the upload. From there, the rest is ripped from boto.key.Key.send_file """ from boto.connection import AWSAuthConnection import mimetypes from hashlib import md5 import base64 BufferSize = 65536 ## set to something very small to make sure ## chunking is working properly fp = open(filename) headers = { 'Content-Type': content_type } if content_type is None: content_type = mimetypes.guess_type(filename)[0] or "text/plain" headers['Content-Type'] = content_type if content_encoding is not None: headers['Content-Encoding'] = content_encoding m = md5() fp.seek(0) s = fp.read(BufferSize) while s: m.update(s) s = fp.read(BufferSize) self.size = fp.tell() fp.seek(0) self.md5 = m.hexdigest() headers['Content-MD5'] = base64.encodestring(m.digest()).rstrip('\n') headers['Content-Length'] = str(self.size) headers['Expect'] = '100-Continue' host, headers = self.get_proxy_config(headers, path) ### how to do this same thing with curl instead... print("curl -i --trace-ascii foo.log -T %s -H %s https://%s%s" % ( filename, " -H ".join("'%s: %s'" % (k,v) for k,v in headers.items()), host, path )) def sender(http_conn, method, path, data, headers): http_conn.putrequest(method, path) for key in headers: http_conn.putheader(key, headers[key]) http_conn.endheaders() fp.seek(0) http_conn.set_debuglevel(0) ### XXX set to e.g. 4 to see what going on if cb: if num_cb > 2: cb_count = self.size / BufferSize / (num_cb-2) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = total_bytes = 0 cb(total_bytes, self.size) l = fp.read(BufferSize) while len(l) > 0: http_conn.send(l) if cb: total_bytes += len(l) i += 1 if i == cb_count or cb_count == -1: cb(total_bytes, self.size) i = 0 l = fp.read(BufferSize) if cb: cb(total_bytes, self.size) response = http_conn.getresponse() body = response.read() fp.seek(0) if response.status == 500 or response.status == 503 or \ response.getheader('location'): # we'll try again return response elif response.status >= 200 and response.status <= 299: self.etag = response.getheader('etag') if self.etag != '"%s"' % self.md5: raise Exception('ETag from S3 did not match computed MD5') return response else: #raise provider.storage_response_error( # response.status, response.reason, body) raise Exception(response.status, response.reason, body) awsc = AWSAuthConnection(host, aws_access_key_id="key_id", aws_secret_access_key="secret") awsc._mexe('PUT', path, None, headers, sender=sender)
Fetches all mood stations. :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#moodstations`. def fetch_all_mood_stations(self, terr=KKBOXTerritory.TAIWAN): ''' Fetches all mood stations. :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#moodstations`. ''' url = 'https://api.kkbox.com/v1.1/mood-stations' url += '?' + url_parse.urlencode({'territory': terr}) return self.http._post_data(url, None, self.http._headers_with_access_token())
Fetches a mood station by given ID. :param station_id: the station ID :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#moodstations-station_id`. def fetch_mood_station(self, station_id, terr=KKBOXTerritory.TAIWAN): ''' Fetches a mood station by given ID. :param station_id: the station ID :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#moodstations-station_id`. ''' url = 'https://api.kkbox.com/v1.1/mood-stations/%s' % station_id url += '?' + url_parse.urlencode({'territory': terr}) return self.http._post_data(url, None, self.http._headers_with_access_token())
Fetches next page based on previously fetched data. Will get the next page url from data['paging']['next']. :param data: previously fetched API response. :type data: dict :return: API response. :rtype: dict def fetch_next_page(self, data): ''' Fetches next page based on previously fetched data. Will get the next page url from data['paging']['next']. :param data: previously fetched API response. :type data: dict :return: API response. :rtype: dict ''' next_url = data['paging']['next'] if next_url != None: next_data = self.http._post_data(next_url, None, self.http._headers_with_access_token()) return next_data else: return None
Fetches data from specific url. :return: The response. :rtype: dict def fetch_data(self, url): ''' Fetches data from specific url. :return: The response. :rtype: dict ''' return self.http._post_data(url, None, self.http._headers_with_access_token())
Fetches a shared playlist by given ID. :param playlist_id: the playlist ID. :type playlist_id: str :param terr: the current territory. :return: API response. :rtype: dictcd See `https://docs-en.kkbox.codes/v1.1/reference#sharedplaylists-playlist_id`. def fetch_shared_playlist(self, playlist_id, terr=KKBOXTerritory.TAIWAN): ''' Fetches a shared playlist by given ID. :param playlist_id: the playlist ID. :type playlist_id: str :param terr: the current territory. :return: API response. :rtype: dictcd See `https://docs-en.kkbox.codes/v1.1/reference#sharedplaylists-playlist_id`. ''' url = 'https://api.kkbox.com/v1.1/shared-playlists/%s' % playlist_id url += '?' + url_parse.urlencode({'territory': terr}) return self.http._post_data(url, None, self.http._headers_with_access_token())
Return a FirewallRule object based on server uuid and rule position. def get_firewall_rule(self, server_uuid, firewall_rule_position, server_instance=None): """ Return a FirewallRule object based on server uuid and rule position. """ url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position) res = self.get_request(url) return FirewallRule(**res['firewall_rule'])
Return all FirewallRule objects based on a server instance or uuid. def get_firewall_rules(self, server): """ Return all FirewallRule objects based on a server instance or uuid. """ server_uuid, server_instance = uuid_and_instance(server) url = '/server/{0}/firewall_rule'.format(server_uuid) res = self.get_request(url) return [ FirewallRule(server=server_instance, **firewall_rule) for firewall_rule in res['firewall_rules']['firewall_rule'] ]
Create a new firewall rule for a given server uuid. The rule can begiven as a dict or with FirewallRule.prepare_post_body(). Returns a FirewallRule object. def create_firewall_rule(self, server, firewall_rule_body): """ Create a new firewall rule for a given server uuid. The rule can begiven as a dict or with FirewallRule.prepare_post_body(). Returns a FirewallRule object. """ server_uuid, server_instance = uuid_and_instance(server) url = '/server/{0}/firewall_rule'.format(server_uuid) body = {'firewall_rule': firewall_rule_body} res = self.post_request(url, body) return FirewallRule(server=server_instance, **res['firewall_rule'])
Delete a firewall rule based on a server uuid and rule position. def delete_firewall_rule(self, server_uuid, firewall_rule_position): """ Delete a firewall rule based on a server uuid and rule position. """ url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position) return self.request('DELETE', url)
Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies. def configure_firewall(self, server, firewall_rule_bodies): """ Helper for calling create_firewall_rule in series for a list of firewall_rule_bodies. """ server_uuid, server_instance = uuid_and_instance(server) return [ self.create_firewall_rule(server_uuid, rule) for rule in firewall_rule_bodies ]
POSTs a raw SMTP message to the Sinkhole API :param data: raw content to be submitted [STRING] :return: { list of predictions } def post(self, data): """ POSTs a raw SMTP message to the Sinkhole API :param data: raw content to be submitted [STRING] :return: { list of predictions } """ uri = '{}/sinkhole'.format(self.client.remote) self.logger.debug(uri) if PYVERSION == 2: try: data = data.decode('utf-8') except Exception: data = data.decode('latin-1') data = { 'message': data } body = self.client.post(uri, data) return body
Returns the lowered method. Capitalize headers, prepend HTTP_ and change - to _. def pre_process_method_headers(method, headers): ''' Returns the lowered method. Capitalize headers, prepend HTTP_ and change - to _. ''' method = method.lower() # Standard WSGI supported headers _wsgi_headers = ["content_length", "content_type", "query_string", "remote_addr", "remote_host", "remote_user", "request_method", "server_name", "server_port"] _transformed_headers = {} # For every header, replace - to _, prepend http_ if necessary and convert # to upper case. for header, value in headers.items(): header = header.replace("-", "_") header = "http_{header}".format( header=header) if header.lower() not in _wsgi_headers else header _transformed_headers.update({header.upper(): value}) return method, _transformed_headers
Define headers that needs to be included from the current request. def headers_to_include_from_request(curr_request): ''' Define headers that needs to be included from the current request. ''' return { h: v for h, v in curr_request.META.items() if h in _settings.HEADERS_TO_INCLUDE}
Based on the given request parameters, constructs and returns the WSGI request object. def get_wsgi_request_object(curr_request, method, url, headers, body): ''' Based on the given request parameters, constructs and returns the WSGI request object. ''' x_headers = headers_to_include_from_request(curr_request) method, t_headers = pre_process_method_headers(method, headers) # Add default content type. if "CONTENT_TYPE" not in t_headers: t_headers.update({"CONTENT_TYPE": _settings.DEFAULT_CONTENT_TYPE}) # Override existing batch requests headers with the new headers passed for this request. x_headers.update(t_headers) content_type = x_headers.get("CONTENT_TYPE", _settings.DEFAULT_CONTENT_TYPE) # Get hold of request factory to construct the request. _request_factory = BatchRequestFactory() _request_provider = getattr(_request_factory, method) secure = _settings.USE_HTTPS request = _request_provider(url, data=body, secure=secure, content_type=content_type, **x_headers) return request
Override the default values for the wsgi environment variables. def _base_environ(self, **request): ''' Override the default values for the wsgi environment variables. ''' # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See http://www.python.org/dev/peps/pep-3333/#environ-variables environ = { 'HTTP_COOKIE': self.cookies.output(header='', sep='; '), 'PATH_INFO': str('/'), 'REMOTE_ADDR': str('127.0.0.1'), 'REQUEST_METHOD': str('GET'), 'SCRIPT_NAME': str(''), 'SERVER_NAME': str('localhost'), 'SERVER_PORT': str('8000'), 'SERVER_PROTOCOL': str('HTTP/1.1'), 'wsgi.version': (1, 0), 'wsgi.url_scheme': str('http'), 'wsgi.input': FakePayload(b''), 'wsgi.errors': self.errors, 'wsgi.multiprocess': True, 'wsgi.multithread': True, 'wsgi.run_once': False, } environ.update(self.defaults) environ.update(request) return environ
Perform a request with a given body to a given endpoint in UpCloud's API. Handles errors with __error_middleware. def request(self, method, endpoint, body=None, timeout=-1): """ Perform a request with a given body to a given endpoint in UpCloud's API. Handles errors with __error_middleware. """ if method not in set(['GET', 'POST', 'PUT', 'DELETE']): raise Exception('Invalid/Forbidden HTTP method') url = '/' + self.api_v + endpoint headers = { 'Authorization': self.token, 'Content-Type': 'application/json' } if body: json_body_or_None = json.dumps(body) else: json_body_or_None = None call_timeout = timeout if timeout != -1 else self.timeout APIcall = getattr(requests, method.lower()) res = APIcall('https://api.upcloud.com' + url, data=json_body_or_None, headers=headers, timeout=call_timeout) if res.text: res_json = res.json() else: res_json = {} return self.__error_middleware(res, res_json)
Perform a POST request to a given endpoint in UpCloud's API. def post_request(self, endpoint, body=None, timeout=-1): """ Perform a POST request to a given endpoint in UpCloud's API. """ return self.request('POST', endpoint, body, timeout)
Middleware that raises an exception when HTTP statuscode is an error code. def __error_middleware(self, res, res_json): """ Middleware that raises an exception when HTTP statuscode is an error code. """ if(res.status_code in [400, 401, 402, 403, 404, 405, 406, 409]): err_dict = res_json.get('error', {}) raise UpCloudAPIError(error_code=err_dict.get('error_code'), error_message=err_dict.get('error_message')) return res_json
Create a new file to swift object storage. def put_stream(self, bucket, label, stream_object, params={}): ''' Create a new file to swift object storage. ''' self.claim_bucket(bucket) self.connection.put_object(bucket, label, stream_object, headers=self._convert_to_meta(params))
PURPOSE Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting model among the variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables. EXAMPLES Friedman and Popescu's (2008) formulas (44) and (46) correspond to h(F, x, [j, k]) and h(F, x, [j, k, l]) respectively. NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. def h(gbm, array_or_frame, indices_or_columns = 'all'): """ PURPOSE Compute Friedman and Popescu's H statistic, in order to look for an interaction in the passed gradient-boosting model among the variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS The H statistic of the variables or NaN if the computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction among the variables. EXAMPLES Friedman and Popescu's (2008) formulas (44) and (46) correspond to h(F, x, [j, k]) and h(F, x, [j, k, l]) respectively. NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. """ if indices_or_columns == 'all': if gbm.max_depth < array_or_frame.shape[1]: raise \ Exception( "gbm.max_depth == {} < array_or_frame.shape[1] == {}, so indices_or_columns must not be 'all'." .format(gbm.max_depth, array_or_frame.shape[1]) ) else: if gbm.max_depth < len(indices_or_columns): raise \ Exception( "gbm.max_depth == {}, so indices_or_columns must contain at most {} {}." .format(gbm.max_depth, gbm.max_depth, "element" if gbm.max_depth == 1 else "elements") ) check_args_contd(array_or_frame, indices_or_columns) arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns) width = arr.shape[1] f_vals = {} for n in range(width, 0, -1): for inds in itertools.combinations(range(width), n): f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds) return compute_h_val(f_vals, arr, tuple(range(width)))
PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. def h_all_pairs(gbm, array_or_frame, indices_or_columns = 'all'): """ PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. """ if gbm.max_depth < 2: raise Exception("gbm.max_depth must be at least 2.") check_args_contd(array_or_frame, indices_or_columns) arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns) width = arr.shape[1] f_vals = {} for n in [2, 1]: for inds in itertools.combinations(range(width), n): f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds) h_vals = {} for inds in itertools.combinations(range(width), 2): h_vals[inds] = compute_h_val(f_vals, arr, inds) if indices_or_columns != 'all': h_vals = {tuple(model_inds[(inds,)]): h_vals[inds] for inds in h_vals.keys()} if not isinstance(array_or_frame, np.ndarray): all_cols = array_or_frame.columns.values h_vals = {tuple(all_cols[(inds,)]): h_vals[inds] for inds in h_vals.keys()} return h_vals
Performs a search against the predict endpoint :param q: query to be searched for [STRING] :return: { score: [0|1] } def get(self, q, limit=None): """ Performs a search against the predict endpoint :param q: query to be searched for [STRING] :return: { score: [0|1] } """ uri = '{}/predict?q={}'.format(self.client.remote, q) self.logger.debug(uri) body = self.client.get(uri) return body['score']
Whether a given bucket:label object already exists. def exists(self, bucket, label): '''Whether a given bucket:label object already exists.''' fn = self._zf(bucket, label) try: self.z.getinfo(fn) return True except KeyError: return False
List labels for the given bucket. Due to zipfiles inherent arbitrary ordering, this is an expensive operation, as it walks the entire archive searching for individual 'buckets' :param bucket: bucket to list labels for. :return: iterator for the labels in the specified bucket. def list_labels(self, bucket): '''List labels for the given bucket. Due to zipfiles inherent arbitrary ordering, this is an expensive operation, as it walks the entire archive searching for individual 'buckets' :param bucket: bucket to list labels for. :return: iterator for the labels in the specified bucket. ''' for name in self.z.namelist(): container, label = self._nf(name.encode("utf-8")) if container == bucket and label != MD_FILE: yield label
List all buckets managed by this OFS instance. Like list_labels, this also walks the entire archive, yielding the bucketnames. A local set is retained so that duplicates aren't returned so this will temporarily pull the entire list into memory even though this is a generator and will slow as more buckets are added to the set. :return: iterator for the buckets. def list_buckets(self): '''List all buckets managed by this OFS instance. Like list_labels, this also walks the entire archive, yielding the bucketnames. A local set is retained so that duplicates aren't returned so this will temporarily pull the entire list into memory even though this is a generator and will slow as more buckets are added to the set. :return: iterator for the buckets. ''' buckets = set() for name in self.z.namelist(): bucket, _ = self._nf(name) if bucket not in buckets: buckets.add(bucket) yield bucket
Get a bitstream for the given bucket:label combination. :param bucket: the bucket to use. :return: bitstream as a file-like object def get_stream(self, bucket, label, as_stream=True): '''Get a bitstream for the given bucket:label combination. :param bucket: the bucket to use. :return: bitstream as a file-like object ''' if self.mode == "w": raise OFSException("Cannot read from archive in 'w' mode") elif self.exists(bucket, label): fn = self._zf(bucket, label) if as_stream: return self.z.open(fn) else: return self.z.read(fn) else: raise OFSFileNotFound
Get a URL that should point at the bucket:labelled resource. Aimed to aid web apps by allowing them to redirect to an open resource, rather than proxy the bitstream. :param bucket: the bucket to use. :param label: the label of the resource to get :return: a string URI - eg 'zip:file:///home/.../foo.zip!/bucket/label' def get_url(self, bucket, label): '''Get a URL that should point at the bucket:labelled resource. Aimed to aid web apps by allowing them to redirect to an open resource, rather than proxy the bitstream. :param bucket: the bucket to use. :param label: the label of the resource to get :return: a string URI - eg 'zip:file:///home/.../foo.zip!/bucket/label' ''' if self.exists(bucket, label): root = "zip:file//%s" % os.path.abspath(self.zipfile) fn = self._zf(bucket, label) return "!/".join(root, fn) else: raise OFSFileNotFound
Put a bitstream (stream_object) for the specified bucket:label identifier. :param bucket: as standard :param label: as standard :param stream_object: file-like object to read from or bytestring. :param params: update metadata with these params (see `update_metadata`) def put_stream(self, bucket, label, stream_object, params=None, replace=True, add_md=True): '''Put a bitstream (stream_object) for the specified bucket:label identifier. :param bucket: as standard :param label: as standard :param stream_object: file-like object to read from or bytestring. :param params: update metadata with these params (see `update_metadata`) ''' if self.mode == "r": raise OFSException("Cannot write into archive in 'r' mode") else: params = params or {} fn = self._zf(bucket, label) params['_creation_date'] = datetime.now().isoformat().split(".")[0] ## '2010-07-08T19:56:47' params['_label'] = label if self.exists(bucket, label) and replace==True: # Add then Replace? Let's see if that works... #z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64) zinfo = self.z.getinfo(fn) size, chksum = self._write(self.z, bucket, label, stream_object) self._del_stream(zinfo) #z.close() params['_content_length'] = size if chksum: params['_checksum'] = chksum else: #z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64) size, chksum = self._write(self.z, bucket, label, stream_object) #z.close() params['_content_length'] = size if chksum: params['_checksum'] = chksum if add_md: params = self.update_metadata(bucket, label, params) return params
Delete a bitstream. This needs more testing - file deletion in a zipfile is problematic. Alternate method is to create second zipfile without the files in question, which is not a nice method for large zip archives. def del_stream(self, bucket, label): '''Delete a bitstream. This needs more testing - file deletion in a zipfile is problematic. Alternate method is to create second zipfile without the files in question, which is not a nice method for large zip archives. ''' if self.exists(bucket, label): name = self._zf(bucket, label) #z = ZipFile(self.zipfile, self.mode, self.compression, self.allowZip64) self._del_stream(name)
Get the metadata for this bucket:label identifier. def get_metadata(self, bucket, label): '''Get the metadata for this bucket:label identifier. ''' if self.mode !="w": try: jsn = self._get_bucket_md(bucket) except OFSFileNotFound: # No MD found... return {} except OFSException as e: raise OFSException(e) if label in jsn: return jsn[label] else: return {} else: raise OFSException("Cannot read md from archive in 'w' mode")
Update the metadata with the provided dictionary of params. :param parmams: dictionary of key values (json serializable). def update_metadata(self, bucket, label, params): '''Update the metadata with the provided dictionary of params. :param parmams: dictionary of key values (json serializable). ''' if self.mode !="r": try: payload = self._get_bucket_md(bucket) except OFSFileNotFound: # No MD found... create it payload = {} for l in self.list_labels(bucket): payload[l] = {} payload[l]['_label'] = l if not self.quiet: print("Had to create md file for %s" % bucket) except OFSException as e: raise OFSException(e) if not label in payload: payload[label] = {} payload[label].update(params) self.put_stream(bucket, MD_FILE, json.dumps(payload).encode('utf-8'), params={}, replace=True, add_md=False) return payload[label] else: raise OFSException("Cannot update MD in archive in 'r' mode")
Delete the metadata corresponding to the specified keys. def del_metadata_keys(self, bucket, label, keys): '''Delete the metadata corresponding to the specified keys. ''' if self.mode !="r": try: payload = self._get_bucket_md(bucket) except OFSFileNotFound: # No MD found... raise OFSFileNotFound("Couldn't find a md file for %s bucket" % bucket) except OFSException as e: raise OFSException(e) if payload.has_key(label): for key in [x for x in keys if payload[label].has_key(x)]: del payload[label][key] self.put_stream(bucket, MD_FILE, json.dumps(payload), params={}, replace=True, add_md=False) else: raise OFSException("Cannot update MD in archive in 'r' mode")
Given a WSGI request, makes a call to a corresponding view function and returns the response. def get_response(wsgi_request): ''' Given a WSGI request, makes a call to a corresponding view function and returns the response. ''' service_start_time = datetime.now() # Get the view / handler for this request view, args, kwargs = resolve(wsgi_request.path_info) kwargs.update({"request": wsgi_request}) # Let the view do his task. try: resp = view(*args, **kwargs) except Exception as exc: resp = HttpResponseServerError(content=exc.message) headers = dict(resp._headers.values()) # Convert HTTP response into simple dict type. d_resp = {"status_code": resp.status_code, "reason_phrase": resp.reason_phrase, "headers": headers} try: d_resp.update({"body": resp.content}) except ContentNotRenderedError: resp.render() d_resp.update({"body": resp.content}) # Check if we need to send across the duration header. if _settings.ADD_DURATION_HEADER: d_resp['headers'].update({_settings.DURATION_HEADER_NAME: (datetime.now() - service_start_time).seconds}) return d_resp
For the given batch request, extract the individual requests and create WSGIRequest object for each. def get_wsgi_requests(request): ''' For the given batch request, extract the individual requests and create WSGIRequest object for each. ''' valid_http_methods = ["get", "post", "put", "patch", "delete", "head", "options", "connect", "trace"] requests = json.loads(request.body) if type(requests) not in (list, tuple): raise BadBatchRequest("The body of batch request should always be list!") # Max limit check. no_requests = len(requests) if no_requests > _settings.MAX_LIMIT: raise BadBatchRequest("You can batch maximum of %d requests." % (_settings.MAX_LIMIT)) # We could mutate the current request with the respective parameters, but mutation is ghost in the dark, # so lets avoid. Construct the new WSGI request object for each request. def construct_wsgi_from_data(data): ''' Given the data in the format of url, method, body and headers, construct a new WSGIRequest object. ''' url = data.get("url", None) method = data.get("method", None) if url is None or method is None: raise BadBatchRequest("Request definition should have url, method defined.") if method.lower() not in valid_http_methods: raise BadBatchRequest("Invalid request method.") body = data.get("body", "") headers = data.get("headers", {}) return get_wsgi_request_object(request, method, url, headers, body) return [construct_wsgi_from_data(data) for data in requests]
A view function to handle the overall processing of batch requests. def handle_batch_requests(request, *args, **kwargs): ''' A view function to handle the overall processing of batch requests. ''' batch_start_time = datetime.now() try: # Get the Individual WSGI requests. wsgi_requests = get_wsgi_requests(request) except BadBatchRequest as brx: return HttpResponseBadRequest(content=brx.message) # Fire these WSGI requests, and collect the response for the same. response = execute_requests(wsgi_requests) # Evrything's done, return the response. resp = HttpResponse( content=json.dumps(response), content_type="application/json") if _settings.ADD_DURATION_HEADER: resp.__setitem__(_settings.DURATION_HEADER_NAME, str((datetime.now() - batch_start_time).seconds)) return resp
Searches within KKBOX's database. :param keyword: the keyword. :type keyword: str :param types: the search types. :return: list :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#search_1`. def search(self, keyword, types=[], terr=KKBOXTerritory.TAIWAN): ''' Searches within KKBOX's database. :param keyword: the keyword. :type keyword: str :param types: the search types. :return: list :param terr: the current territory. :return: API response. :rtype: dict See `https://docs-en.kkbox.codes/v1.1/reference#search_1`. ''' url = 'https://api.kkbox.com/v1.1/search' url += '?' + url_parse.urlencode({'q': keyword, 'territory': terr}) if len(types) > 0: url += '&type=' + ','.join(types) return self.http._post_data(url, None, self.http._headers_with_access_token())
IPAddress can only change its PTR record. Saves the current state, PUT /ip_address/uuid. def save(self): """ IPAddress can only change its PTR record. Saves the current state, PUT /ip_address/uuid. """ body = {'ip_address': {'ptr_record': self.ptr_record}} data = self.cloud_manager.request('PUT', '/ip_address/' + self.address, body) self._reset(**data['ip_address'])
Create IPAddress objects from API response data. Also associates CloudManager with the objects. def _create_ip_address_objs(ip_addresses, cloud_manager): """ Create IPAddress objects from API response data. Also associates CloudManager with the objects. """ # ip-addresses might be provided as a flat array or as a following dict: # {'ip_addresses': {'ip_address': [...]}} || {'ip_address': [...]} if 'ip_addresses' in ip_addresses: ip_addresses = ip_addresses['ip_addresses'] if 'ip_address' in ip_addresses: ip_addresses = ip_addresses['ip_address'] return [ IPAddress(cloud_manager=cloud_manager, **ip_addr) for ip_addr in ip_addresses ]
Reset the objects attributes. Accepts servers as either unflattened or flattened UUID strings or Server objects. def _reset(self, **kwargs): """ Reset the objects attributes. Accepts servers as either unflattened or flattened UUID strings or Server objects. """ super(Tag, self)._reset(**kwargs) # backup name for changing it (look: Tag.save) self._api_name = self.name # flatten { servers: { server: [] } } if 'server' in self.servers: self.servers = kwargs['servers']['server'] # convert UUIDs into server objects if self.servers and isinstance(self.servers[0], six.string_types): self.servers = [Server(uuid=server, populated=False) for server in self.servers]
HTTP GET function :param uri: REST endpoint :param params: optional HTTP params to pass to the endpoint :return: list of results (usually a list of dicts) Example: ret = cli.get('/search', params={ 'q': 'example.org' }) def _get(self, uri, params={}): """ HTTP GET function :param uri: REST endpoint :param params: optional HTTP params to pass to the endpoint :return: list of results (usually a list of dicts) Example: ret = cli.get('/search', params={ 'q': 'example.org' }) """ if not uri.startswith(self.remote): uri = '{}{}'.format(self.remote, uri) return self._make_request(uri, params)
HTTP POST function :param uri: REST endpoint to POST to :param data: list of dicts to be passed to the endpoint :return: list of dicts, usually will be a list of objects or id's Example: ret = cli.post('/indicators', { 'indicator': 'example.com' }) def _post(self, uri, data): """ HTTP POST function :param uri: REST endpoint to POST to :param data: list of dicts to be passed to the endpoint :return: list of dicts, usually will be a list of objects or id's Example: ret = cli.post('/indicators', { 'indicator': 'example.com' }) """ if not uri.startswith(self.remote): uri = '{}/{}'.format(self.remote, uri) self.logger.debug(uri) return self._make_request(uri, data=data)
Return a list of (populated or unpopulated) Server instances. - populate = False (default) => 1 API request, returns unpopulated Server instances. - populate = True => Does 1 + n API requests (n = # of servers), returns populated Server instances. New in 0.3.0: the list can be filtered with tags: - tags_has_one: list of Tag objects or strings returns servers that have at least one of the given tags - tags_has_all: list of Tag objects or strings returns servers that have all of the tags def get_servers(self, populate=False, tags_has_one=None, tags_has_all=None): """ Return a list of (populated or unpopulated) Server instances. - populate = False (default) => 1 API request, returns unpopulated Server instances. - populate = True => Does 1 + n API requests (n = # of servers), returns populated Server instances. New in 0.3.0: the list can be filtered with tags: - tags_has_one: list of Tag objects or strings returns servers that have at least one of the given tags - tags_has_all: list of Tag objects or strings returns servers that have all of the tags """ if tags_has_all and tags_has_one: raise Exception('only one of (tags_has_all, tags_has_one) is allowed.') request = '/server' if tags_has_all: tags_has_all = [str(tag) for tag in tags_has_all] taglist = ':'.join(tags_has_all) request = '/server/tag/{0}'.format(taglist) if tags_has_one: tags_has_one = [str(tag) for tag in tags_has_one] taglist = ','.join(tags_has_one) request = '/server/tag/{0}'.format(taglist) servers = self.get_request(request)['servers']['server'] server_list = list() for server in servers: server_list.append(Server(server, cloud_manager=self)) if populate: for server_instance in server_list: server_instance.populate() return server_list
Return a (populated) Server instance. def get_server(self, UUID): """ Return a (populated) Server instance. """ server, IPAddresses, storages = self.get_server_data(UUID) return Server( server, ip_addresses=IPAddresses, storage_devices=storages, populated=True, cloud_manager=self )
Return a (populated) Server instance by its IP. Uses GET '/ip_address/x.x.x.x' to retrieve machine UUID using IP-address. def get_server_by_ip(self, ip_address): """ Return a (populated) Server instance by its IP. Uses GET '/ip_address/x.x.x.x' to retrieve machine UUID using IP-address. """ data = self.get_request('/ip_address/{0}'.format(ip_address)) UUID = data['ip_address']['server'] return self.get_server(UUID)
Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" def create_server(self, server): """ Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" """ if isinstance(server, Server): body = server.prepare_post_body() else: server = Server._create_server_obj(server, cloud_manager=self) body = server.prepare_post_body() res = self.post_request('/server', body) server_to_return = server server_to_return._reset( res['server'], cloud_manager=self, populated=True ) return server_to_return
modify_server allows updating the server's updateable_fields. Note: Server's IP-addresses and Storages are managed by their own add/remove methods. def modify_server(self, UUID, **kwargs): """ modify_server allows updating the server's updateable_fields. Note: Server's IP-addresses and Storages are managed by their own add/remove methods. """ body = dict() body['server'] = {} for arg in kwargs: if arg not in Server.updateable_fields: Exception('{0} is not an updateable field'.format(arg)) body['server'][arg] = kwargs[arg] res = self.request('PUT', '/server/{0}'.format(UUID), body) server = res['server'] # Populate subobjects IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'), cloud_manager=self) storages = Storage._create_storage_objs(server.pop('storage_devices'), cloud_manager=self) return Server( server, ip_addresses=IPAddresses, storage_devices=storages, populated=True, cloud_manager=self )
Return '/server/uuid' data in Python dict. Creates object representations of any IP-address and Storage. def get_server_data(self, UUID): """ Return '/server/uuid' data in Python dict. Creates object representations of any IP-address and Storage. """ data = self.get_request('/server/{0}'.format(UUID)) server = data['server'] # Populate subobjects IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'), cloud_manager=self) storages = Storage._create_storage_objs(server.pop('storage_devices'), cloud_manager=self) return server, IPAddresses, storages
Pull a feed :param f: feed name (eg: csirtgadgetes/correlated) :param limit: return value limit (default 25) :return: Feed dict def feed(f, limit=25): """ Pull a feed :param f: feed name (eg: csirtgadgetes/correlated) :param limit: return value limit (default 25) :return: Feed dict """ if '/' not in f: raise ValueError('feed name must be formatted like: ' 'csirtgadgets/scanners') user, f = f.split('/') return Feed().show(user, f, limit=limit)
Create an indicator in a feed :param f: feed name (eg: wes/test) :param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'], 'description': 'this is a test'}) :return: dict of indicator def indicator_create(f, i): """ Create an indicator in a feed :param f: feed name (eg: wes/test) :param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'], 'description': 'this is a test'}) :return: dict of indicator """ if '/' not in f: raise ValueError('feed name must be formatted like: ' 'csirtgadgets/scanners') if not i: raise ValueError('missing indicator dict') u, f = f.split('/') i['user'] = u i['feed'] = f ret = Indicator(i).submit() return ret
Reads the content of file in IDX format, converts it into numpy.ndarray and returns it. file is a file-like object (with read() method) or a file name. def convert_from_file(file): """ Reads the content of file in IDX format, converts it into numpy.ndarray and returns it. file is a file-like object (with read() method) or a file name. """ if isinstance(file, six_string_types): with open(file, 'rb') as f: return _internal_convert(f) else: return _internal_convert(file)
Converts file in IDX format provided by file-like input into numpy.ndarray and returns it. def _internal_convert(inp): """ Converts file in IDX format provided by file-like input into numpy.ndarray and returns it. """ ''' Converts file in IDX format provided by file-like input into numpy.ndarray and returns it. ''' # Read the "magic number" - 4 bytes. try: mn = struct.unpack('>BBBB', inp.read(4)) except struct.error: raise FormatError(struct.error) # First two bytes are always zero, check it. if mn[0] != 0 or mn[1] != 0: msg = ("Incorrect first two bytes of the magic number: " + "0x{0:02X} 0x{1:02X}".format(mn[0], mn[1])) raise FormatError(msg) # 3rd byte is the data type code. dtype_code = mn[2] if dtype_code not in _DATA_TYPES_IDX: msg = "Incorrect data type code: 0x{0:02X}".format(dtype_code) raise FormatError(msg) # 4th byte is the number of dimensions. dims = int(mn[3]) # See possible data types description. dtype, dtype_s, el_size = _DATA_TYPES_IDX[dtype_code] # 4-byte integer for length of each dimension. try: dims_sizes = struct.unpack('>' + 'I' * dims, inp.read(4 * dims)) except struct.error as e: raise FormatError('Dims sizes: {0}'.format(e)) # Full length of data. full_length = reduce(operator.mul, dims_sizes, 1) # Create a numpy array from the data try: result_array = numpy.frombuffer( inp.read(full_length * el_size), dtype=numpy.dtype(dtype) ).reshape(dims_sizes) except ValueError as e: raise FormatError('Error creating numpy array: {0}'.format(e)) # Check for superfluous data. if len(inp.read(1)) > 0: raise FormatError('Superfluous data detected.') return result_array
Writes the contents of the numpy.ndarray ndarr to file in IDX format. file is a file-like object (with write() method) or a file name. def convert_to_file(file, ndarr): """ Writes the contents of the numpy.ndarray ndarr to file in IDX format. file is a file-like object (with write() method) or a file name. """ if isinstance(file, six_string_types): with open(file, 'wb') as fp: _internal_write(fp, ndarr) else: _internal_write(file, ndarr)
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and returns it. def convert_to_string(ndarr): """ Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and returns it. """ with contextlib.closing(BytesIO()) as bytesio: _internal_write(bytesio, ndarr) return bytesio.getvalue()
Writes numpy.ndarray arr to a file-like object (with write() method) in IDX format. def _internal_write(out_stream, arr): """ Writes numpy.ndarray arr to a file-like object (with write() method) in IDX format. """ if arr.size == 0: raise FormatError('Cannot encode empty array.') try: type_byte, struct_lib_type = _DATA_TYPES_NUMPY[str(arr.dtype)] except KeyError: raise FormatError('numpy ndarray type not supported by IDX format.') if arr.ndim > _MAX_IDX_DIMENSIONS: raise FormatError( 'IDX format cannot encode array with dimensions > 255') if max(arr.shape) > _MAX_AXIS_LENGTH: raise FormatError('IDX format cannot encode array with more than ' + str(_MAX_AXIS_LENGTH) + ' elements along any axis') # Write magic number out_stream.write(struct.pack('BBBB', 0, 0, type_byte, arr.ndim)) # Write array dimensions out_stream.write(struct.pack('>' + 'I' * arr.ndim, *arr.shape)) # Horrible hack to deal with horrible bug when using struct.pack to encode # unsigned ints in 2.7 and lower, see http://bugs.python.org/issue2263 if sys.version_info < (2, 7) and str(arr.dtype) == 'uint8': arr_as_list = [int(i) for i in arr.reshape(-1)] out_stream.write(struct.pack('>' + struct_lib_type * arr.size, *arr_as_list)) else: # Write array contents - note that the limit to number of arguments # doesn't apply to unrolled arguments out_stream.write(struct.pack('>' + struct_lib_type * arr.size, *arr.reshape(-1)))
There are three ways to let you start using KKBOX's Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`. def fetch_access_token_by_client_credentials(self): ''' There are three ways to let you start using KKBOX's Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`. ''' client_credential_base = '%s:%s' % (self.client_id, self.client_secret) try: client_credentials = base64.b64encode( bytes(client_credential_base, 'utf-8')) except: client_credentials = base64.b64encode(client_credential_base) client_credentials = client_credentials.decode('utf-8') headers = {'Authorization': 'Basic ' + client_credentials, 'Content-type': 'application/x-www-form-urlencoded'} post_parameters = {'grant_type': 'client_credentials', 'scope': 'user_profile user_territory'} json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters, headers) self.access_token = KKBOXAccessToken(**json_object) return self.access_token
Validate Storage OS and its UUID. If the OS is a custom OS UUID, don't validate against templates. def get_OS_UUID(cls, os): """ Validate Storage OS and its UUID. If the OS is a custom OS UUID, don't validate against templates. """ if os in cls.templates: return cls.templates[os] uuid_regexp = '^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}$' if re.search(uuid_regexp, os): return os raise Exception(( "Invalid OS -- valid options are: 'CentOS 6.5', 'CentOS 7.0', " "'Debian 7.8', 'Debian 8.0' ,'Ubuntu 12.04', 'Ubuntu 14.04', 'Ubuntu 16.04', " "'Windows 2008', 'Windows 2012'" ))
Calls the resp_generator for all the requests in parallel in an asynchronous way. def execute(self, requests, resp_generator, *args, **kwargs): ''' Calls the resp_generator for all the requests in parallel in an asynchronous way. ''' result_futures = [self.executor_pool.submit(resp_generator, req, *args, **kwargs) for req in requests] resp = [res_future.result() for res_future in result_futures] return resp
Calls the resp_generator for all the requests in sequential order. def execute(self, requests, resp_generator, *args, **kwargs): ''' Calls the resp_generator for all the requests in sequential order. ''' return [resp_generator(request) for request in requests]
Sets up basic logging :param args: ArgParse arguments :return: nothing. sets logger up globally def setup_logging(args): """ Sets up basic logging :param args: ArgParse arguments :return: nothing. sets logger up globally """ loglevel = logging.WARNING if args.verbose: loglevel = logging.INFO if args.debug: loglevel = logging.DEBUG console = logging.StreamHandler() logging.getLogger('').setLevel(loglevel) console.setFormatter(logging.Formatter(LOG_FORMAT)) logging.getLogger('').addHandler(console)
Get an IPAddress object with the IP address (string) from the API. e.g manager.get_ip('80.69.175.210') def get_ip(self, address): """ Get an IPAddress object with the IP address (string) from the API. e.g manager.get_ip('80.69.175.210') """ res = self.get_request('/ip_address/' + address) return IPAddress(cloud_manager=self, **res['ip_address'])
Get all IPAddress objects from the API. def get_ips(self): """ Get all IPAddress objects from the API. """ res = self.get_request('/ip_address') IPs = IPAddress._create_ip_address_objs(res['ip_addresses'], cloud_manager=self) return IPs
Attach a new (random) IPAddress to the given server (object or UUID). def attach_ip(self, server, family='IPv4'): """ Attach a new (random) IPAddress to the given server (object or UUID). """ body = { 'ip_address': { 'server': str(server), 'family': family } } res = self.request('POST', '/ip_address', body) return IPAddress(cloud_manager=self, **res['ip_address'])
Modify an IP address' ptr-record (Reverse DNS). Accepts an IPAddress instance (object) or its address (string). def modify_ip(self, ip_addr, ptr_record): """ Modify an IP address' ptr-record (Reverse DNS). Accepts an IPAddress instance (object) or its address (string). """ body = { 'ip_address': { 'ptr_record': ptr_record } } res = self.request('PUT', '/ip_address/' + str(ip_addr), body) return IPAddress(cloud_manager=self, **res['ip_address'])
Creates a new Feed object :param user: feed username :param name: feed name :param description: feed description :return: dict def new(self, user, name, description=None): """ Creates a new Feed object :param user: feed username :param name: feed name :param description: feed description :return: dict """ uri = self.client.remote + '/users/{0}/feeds'.format(user) data = { 'feed': { 'name': name, 'description': description } } resp = self.client.post(uri, data) return resp
Removes a feed :param user: feed username :param name: feed name :return: true/false def delete(self, user, name): """ Removes a feed :param user: feed username :param name: feed name :return: true/false """ uri = self.client.remote + '/users/{}/feeds/{}'.format(user, name) resp = self.client.session.delete(uri) return resp.status_code
Returns a list of Feeds from the API :param user: feed username :return: list Example: ret = feed.index('csirtgadgets') def index(self, user): """ Returns a list of Feeds from the API :param user: feed username :return: list Example: ret = feed.index('csirtgadgets') """ uri = self.client.remote + '/users/{0}/feeds'.format(user) return self.client.get(uri)