Search is not available for this dataset
text
stringlengths
75
104k
def create_zip_dir(zipfile_path, *file_list): """ This function creates a zipfile located in zipFilePath with the files in the file list # fileList can be both a comma separated list or an array """ try: if isinstance(file_list, (list, tuple)): #unfolding list of list or tuple if len(file_list) == 1: if isinstance(file_list[0], (list, tuple)): file_list = file_list[0] #converting string to iterable list if isinstance(file_list, str): file_list = [file_list] if file_list: with ZipFile(zipfile_path, 'w') as zf: for cur_file in file_list: if '/' in cur_file: os.chdir('/'.join(cur_file.split('/')[:-1])) elif '/' in zipfile_path: os.chdir('/'.join(zipfile_path.split('/')[:-1])) zf.write(cur_file.split('/')[-1]) else: debug.log('Error: No Files in list!',zipfile_path+' was not created!') except Exception as e: debug.log('Error: Could not create zip dir! argtype: '+ str(type(file_list)), "FileList: "+ str(file_list), "Errormessage: "+ str(e))
def file_zipper(root_dir): """ This function will zip the files created in the runroot directory and subdirectories """ # FINDING AND ZIPPING UNZIPPED FILES for root, dirs, files in os.walk(root_dir, topdown=False): if root != "": if root[-1] != '/': root += '/' for current_file in files: filepath = "%s/%s"%(root, current_file) try: file_size = os.path.getsize(filepath) except Exception as e: file_size = 0 debug.log('Error: file_zipper failed to zip following file '+filepath, e) # Excluding small files, gzipped files and links if ( file_size > 50 and current_file[-3:] != ".gz" and not os.path.islink(filepath) ): if current_file[-4:] == ".zip": # Unzip file ec = Popen('unzip -qq "%s" -d %s > /dev/null 2>&1'%(filepath, root), shell=True).wait() if ec > 0: debug.log('Error: fileZipper failed to unzip following file %s'%filepath) continue else: ec = Popen('rm -f "%s" > /dev/null 2>&1'%(filepath), shell=True).wait() if ec > 0: debug.log('Error: fileZipper failed to delete the original zip file (%s)'%filepath) filepath = filepath[:-4] # Saving a gzipped version with open_(filepath, 'rb') as f, open_(filepath+".gz", 'wb', 9) as gz: gz.writelines(f) # Deleting old (non-zipped) file try: os.remove(filepath) except OSError as e: debug.log(("WARNING! The file %s could not be " "removed!\n%s")%(current_file, e))
def file_unzipper(directory): """ This function will unzip all files in the runroot directory and subdirectories """ debug.log("Unzipping directory (%s)..."%directory) #FINDING AND UNZIPPING ZIPPED FILES for root, dirs, files in os.walk(directory, topdown=False): if root != "": orig_dir = os.getcwd() os.chdir(directory) Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait() Popen('unzip -qq -o "*.zip" > /dev/null 2>&1', shell=True).wait() Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait() os.chdir(orig_dir)
def move_file(src, dst): """ this function will simply move the file from the source path to the dest path given as input """ # Sanity checkpoint src = re.sub('[^\w/\-\.\*]', '', src) dst = re.sub('[^\w/\-\.\*]', '', dst) if len(re.sub('[\W]', '', src)) < 5 or len(re.sub('[\W]', '', dst)) < 5: debug.log("Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'"%(src, dst)) else: # Check destination check = False if dst[-1] == '/': if os.path.exists(dst): check = True # Valid Dir else: debug.log("Error: Moving file failed. Destination directory does not exist (%s)"%(dst)) #DEBUG elif os.path.exists(dst): if os.path.isdir(dst): check = True # Valid Dir dst += '/' # Add missing slash else: debug.log("Error: Moving file failed. %s exists!"%dst) elif os.path.exists(os.path.dirname(dst)): check = True # Valid file path else: debug.log("Error: Moving file failed. %s is an invalid distination!"%dst) if check: # Check source files = glob.glob(src) if len(files) != 0: debug.log("Moving File(s)...", "Move from %s"%src, "to %s"%dst) for file_ in files: # Check if file contains invalid symbols: invalid_chars = re.findall('[^\w/\-\.\*]', os.path.basename(file_)) if invalid_chars: debug.graceful_exit(("Error: File %s contains invalid " "characters %s!" )%(os.path.basename(file_), invalid_chars)) continue # Check file exists if os.path.isfile(file_): debug.log("Moving file: %s"%file_) shutil.move(file_, dst) else: debug.log("Error: Moving file failed. %s is not a regular file!"%file_) else: debug.log("Error: Moving file failed. No files were found! (%s)"%src)
def copy_file(src, dst, ignore=None): """ this function will simply copy the file from the source path to the dest path given as input """ # Sanity checkpoint src = re.sub('[^\w/\-\.\*]', '', src) dst = re.sub('[^\w/\-\.\*]', '', dst) if len(re.sub('[\W]', '', src)) < 5 or len(re.sub('[\W]', '', dst)) < 5: debug.log("Error: Copying file failed. Provided paths are invalid! src='%s' dst='%s'"%(src, dst)) else: # Check destination check = False if dst[-1] == '/': if os.path.exists(dst): check = True # Valid Dir else: debug.log("Error: Copying file failed. Destination directory does not exist (%s)"%(dst)) #DEBUG elif os.path.exists(dst): if os.path.isdir(dst): check = True # Valid Dir dst += '/' # Add missing slash else: debug.log("Error: Copying file failed. %s exists!"%dst) elif os.path.exists(os.path.dirname(dst)): check = True # Valid file path else: debug.log("Error: Copying file failed. %s is an invalid distination!"%dst) if check: # Check source files = glob.glob(src) if ignore is not None: files = [fil for fil in files if not ignore in fil] if len(files) != 0: debug.log("Copying File(s)...", "Copy from %s"%src, "to %s"%dst) #DEBUG for file_ in files: # Check file exists if os.path.isfile(file_): debug.log("Copying file: %s"%file_) #DEBUG shutil.copy(file_, dst) else: debug.log("Error: Copying file failed. %s is not a regular file!"%file_) #DEBUG else: debug.log("Error: Copying file failed. No files were found! (%s)"%src)
def copy_dir(src, dst): """ this function will simply copy the file from the source path to the dest path given as input """ try: debug.log("copy dir from "+ src, "to "+ dst) shutil.copytree(src, dst) except Exception as e: debug.log("Error: happened while copying!\n%s\n"%e)
def print_out(self, *lst): """ Print list of strings to the predefined stdout. """ self.print2file(self.stdout, True, True, *lst)
def print_err(self, *lst): """ Print list of strings to the predefined stdout. """ self.print2file(self.stderr, False, True, *lst)
def print2file(self, logfile, print2screen, addLineFeed, *lst): """ This function prints to the screen and logs to a file, all the strings given. # print2screen eg. True, *lst is a commaseparated list of strings """ if addLineFeed: linefeed = '\n' else: linefeed = '' if print2screen: print(linefeed.join(str(string) for string in lst)) try: file_instance = isinstance(logfile, file) except NameError as e: from io import IOBase try: file_instance = isinstance(logfile, IOBase) except: raise e if file_instance: logfile.write(linefeed.join(str(string) for string in lst) + linefeed) elif isinstance(logfile, str) and os.path.exists(logfile): with open_(logfile, 'a') as f: f.write(linefeed.join(str(string) for string in lst) + linefeed) elif not print2screen: # Print to screen if there is no outputfile print(linefeed.join(str(string) for string in lst))
def log(self, *lst): """ Print list of strings to the predefined logfile if debug is set. and sets the caught_error message if an error is found """ self.print2file(self.logfile, self.debug, True, *lst) if 'Error' in '\n'.join([str(x) for x in lst]): self.caught_error = '\n'.join([str(x) for x in lst])
def log_no_newline(self, msg): """ print the message to the predefined log file without newline """ self.print2file(self.logfile, False, False, msg)
def graceful_exit(self, msg): """ This function Tries to update the MSQL database before exiting. """ # Print stored errors to stderr if self.caught_error: self.print2file(self.stderr, False, False, self.caught_error) # Kill process with error message self.log(msg) sys.exit(1)
def get_tree(self, list_of_keys): """ gettree will extract the value from a nested tree INPUT list_of_keys: a list of keys ie. ['key1', 'key2'] USAGE >>> # Access the value for key2 within the nested dictionary >>> adv_dict({'key1': {'key2': 'value'}}).gettree(['key1', 'key2']) 'value' """ cur_obj = self for key in list_of_keys: cur_obj = cur_obj.get(key) if not cur_obj: break return cur_obj
def invert(self): ''' Return inverse mapping of dictionary with sorted values. USAGE >>> # Switch the keys and values >>> adv_dict({ ... 'A': [1, 2, 3], ... 'B': [4, 2], ... 'C': [1, 4], ... }).invert() {1: ['A', 'C'], 2: ['A', 'B'], 3: ['A'], 4: ['B', 'C']} ''' inv_map = {} for k, v in self.items(): if sys.version_info < (3, 0): acceptable_v_instance = isinstance(v, (str, int, float, long)) else: acceptable_v_instance = isinstance(v, (str, int, float)) if acceptable_v_instance: v = [v] elif not isinstance(v, list): raise Exception('Error: Non supported value format! Values may only' ' be numerical, strings, or lists of numbers and ' 'strings.') for val in v: inv_map[val] = inv_map.get(val, []) inv_map[val].append(k) inv_map[val].sort() return inv_map
def sub(self, replace, string, count=0): """ returns new string where the matching cases (limited by the count) in the string is replaced. """ return self.re.sub(replace, string, count)
def match(self, s): """ Matches the string to the stored regular expression, and stores all groups in mathches. Returns False on negative match. """ self.matches = self.re.search(s) return self.matches
def match(self, s): """ Matching the pattern to the input string, returns True/False and saves the matched string in the internal list """ if self.re.match(s): self.list.append(s) return True else: return False
def reporter(self): """ Create the MASH report """ logging.info('Creating {} report'.format(self.analysistype)) make_path(self.reportpath) header = 'Strain,ReferenceGenus,ReferenceFile,ReferenceGenomeMashDistance,Pvalue,NumMatchingHashes\n' data = '' for sample in self.metadata: try: data += '{},{},{},{},{},{}\n'.format(sample.name, sample[self.analysistype].closestrefseqgenus, sample[self.analysistype].closestrefseq, sample[self.analysistype].mashdistance, sample[self.analysistype].pvalue, sample[self.analysistype].nummatches) except AttributeError: data += '{}\n'.format(sample.name) # Create the report file reportfile = os.path.join(self.reportpath, 'mash.csv') with open(reportfile, 'w') as report: report.write(header) report.write(data)
def get_function(pkgpath): """Take a full path to a python method or class, for example mypkg.subpkg.method and return the method or class (after importing the required packages) """ # Extract the module and function name from pkgpath elems = pkgpath.split('.') if len(elems) <= 1: raise PyMacaronCoreException("Path %s is too short. Should be at least module.func." % elems) func_name = elems[-1] func_module = '.'.join(elems[0:-1]) # Load the function's module and get the function try: m = import_module(func_module) f = getattr(m, func_name) return f except Exception as e: t = traceback.format_exc() raise PyMacaronCoreException("Failed to import %s: %s\nTrace:\n%s" % (pkgpath, str(e), t))
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting {} analysis pipeline'.format(self.analysistype), self.starttime) # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses sippr = Sippr(self, self.cutoff) sippr.clear() # Print the metadata printer = MetadataPrinter(self) printer.printmetadata()
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' word = split(word) # detect any non-delimited compounds compound = True if re.search(r'-| |\.', word) else False syllabify = _syllabify_compound if compound else _syllabify syll, rules = syllabify(word) yield syll, rules n = 3 if 'T4' in rules: yield syllabify(word, T4=False) n -= 1 if 'e' in rules: yield syllabify(word, T1E=False) n -= 1 if 'e' in rules and 'T4' in rules: yield syllabify(word, T4=False, T1E=False) n -= 1 # yield empty syllabifications and rules for n in range(7): yield '', ''
def edges(self): """ Return the edge characters of this node. """ edge_str = ctypes.create_string_buffer(MAX_CHARS) cgaddag.gdg_edges(self.gdg, self.node, edge_str) return [char for char in edge_str.value.decode("ascii")]
def letter_set(self): """ Return the letter set of this node. """ end_str = ctypes.create_string_buffer(MAX_CHARS) cgaddag.gdg_letter_set(self.gdg, self.node, end_str) return [char for char in end_str.value.decode("ascii")]
def is_end(self, char): """ Return `True` if this `char` is part of this node's letter set, `False` otherwise. """ char = char.lower() return bool(cgaddag.gdg_is_end(self.gdg, self.node, char.encode("ascii")))
def follow(self, chars): """ Traverse the GADDAG to the node at the end of the given characters. Args: chars: An string of characters to traverse in the GADDAG. Returns: The Node which is found by traversing the tree. """ chars = chars.lower() node = self.node for char in chars: node = cgaddag.gdg_follow_edge(self.gdg, node, char.encode("ascii")) if not node: raise KeyError(char) return Node(self.gdg, node)
def _split_docker_link(alias_name): """ Splits a docker link string into a list of 3 items (protocol, host, port). - Assumes IPv4 Docker links ex: _split_docker_link('DB') -> ['tcp', '172.17.0.82', '8080'] """ sanitized_name = alias_name.strip().upper() split_list = re.split(r':|//', core.str('{0}_PORT'.format(sanitized_name))) # filter out empty '' vals from the list with filter and # cast to list (required for python3) return list(filter(None, split_list))
def read(alias_name, allow_none=False): """Get the raw docker link value. Get the raw environment variable for the docker link Args: alias_name: The environment variable name default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) return core.read('{0}_PORT'.format(alias_name), default=None, allow_none=allow_none)
def isset(alias_name): """Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) raw_value = read(alias_name, allow_none=True) if raw_value: if re.compile(r'.+://.+:\d+').match(raw_value): return True else: warnings.warn('"{0}_PORT={1}" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2) return False return False
def protocol(alias_name, default=None, allow_none=False): """Get the protocol from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.protocol('DB') tcp """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return _split_docker_link(alias_name)[0] except KeyError as err: if default or allow_none: return default else: raise err
def port(alias_name, default=None, allow_none=False): """Get the port from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.port('DB') 5432 """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return int(_split_docker_link(alias_name)[2]) except KeyError as err: if default or allow_none: return default else: raise err
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting {} analysis pipeline'.format(self.analysistype), self.starttime) if not self.pipeline: # If the metadata has been passed from the method script, self.pipeline must still be false in order to # get Sippr() to function correctly, but the metadata shouldn't be recreated try: _ = vars(self.runmetadata)['samples'] except KeyError: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Sippr(self, self.cutoff) # self.attributer() # Create the reports self.reporter() # Print the metadata printer = MetadataPrinter(self) printer.printmetadata()
def attributer(self): """ Parses the 16S target files to link accession numbers stored in the .fai and metadata files to the genera stored in the target file """ from Bio import SeqIO import operator for sample in self.runmetadata.samples: # Load the records from the target file into a dictionary record_dict = SeqIO.to_dict(SeqIO.parse(sample[self.analysistype].baitfile, "fasta")) sample[self.analysistype].classification = set() sample[self.analysistype].genera = dict() # Add all the genera with hits into the set of genera for result in sample[self.analysistype].results: genus, species = record_dict[result].description.split('|')[-1].split()[:2] sample[self.analysistype].classification.add(genus) sample[self.analysistype].genera[result] = genus # Convert the set to a list for easier JSON serialisation sample[self.analysistype].classification = list(sample[self.analysistype].classification) # If there is a mixed sample, then further analyses will be complicated if len(sample[self.analysistype].classification) > 1: # print('multiple: ', sample.name, sample[self.analysistype].classification) sample.general.closestrefseqgenus = sample[self.analysistype].classification # sample.general.bestassemblyfile = 'NA' sample[self.analysistype].multiple = True else: sample[self.analysistype].multiple = False try: # Recreate the results dictionary with the percent identity as a float rather than a string sample[self.analysistype].intresults = \ {key: float(value) for key, value in sample[self.analysistype].results.items()} # Set the best hit to be the top entry from the sorted results sample[self.analysistype].besthit = sorted(sample[self.analysistype].intresults.items(), key=operator.itemgetter(1), reverse=True)[0] sample.general.closestrefseqgenus = sample[self.analysistype].classification[0] except IndexError: sample.general.bestassemblyfile = 'NA'
def reporter(self): """ Creates a report of the results """ # Create the path in which the reports are stored make_path(self.reportpath) header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n' data = '' with open(os.path.join(self.reportpath, self.analysistype + '.csv'), 'w') as report: for sample in self.runmetadata.samples: data += sample.name + ',' if sample[self.analysistype].results: if not sample[self.analysistype].multiple: for name, identity in sample[self.analysistype].results.items(): if name == sample[self.analysistype].besthit[0]: data += '{},{},{},{}\n'.format(name, identity, sample[self.analysistype].genera[name], sample[self.analysistype].avgdepth[name]) else: data += '{},{},{},{}\n'.format('multiple', 'NA', ';'.join(sample[self.analysistype] .classification), 'NA') else: data += '\n' report.write(header) report.write(data)
def spawn_server_api(api_name, app, api_spec, error_callback, decorator): """Take a a Flask app and a swagger file in YAML format describing a REST API, and populate the app with routes handling all the paths and methods declared in the swagger file. Also handle marshaling and unmarshaling between json and object instances representing the definitions from the swagger file. """ def mycallback(endpoint): handler_func = get_function(endpoint.handler_server) # Generate api endpoint around that handler handler_wrapper = _generate_handler_wrapper(api_name, api_spec, endpoint, handler_func, error_callback, decorator) # Bind handler to the API path log.info("Binding %s %s ==> %s" % (endpoint.method, endpoint.path, endpoint.handler_server)) endpoint_name = '_'.join([endpoint.method, endpoint.path]).replace('/', '_') app.add_url_rule(endpoint.path, endpoint_name, handler_wrapper, methods=[endpoint.method]) api_spec.call_on_each_endpoint(mycallback) # Add custom error handlers to the app add_error_handlers(app)
def _responsify(api_spec, error, status): """Take a bravado-core model representing an error, and return a Flask Response with the given error code and error instance as body""" result_json = api_spec.model_to_json(error) r = jsonify(result_json) r.status_code = status return r
def _generate_handler_wrapper(api_name, api_spec, endpoint, handler_func, error_callback, global_decorator): """Generate a handler method for the given url method+path and operation""" # Decorate the handler function, if Swagger spec tells us to if endpoint.decorate_server: endpoint_decorator = get_function(endpoint.decorate_server) handler_func = endpoint_decorator(handler_func) @wraps(handler_func) def handler_wrapper(**path_params): log.info(" ") log.info(" ") log.info("=> INCOMING REQUEST %s %s -> %s" % (endpoint.method, endpoint.path, handler_func.__name__)) log.info(" ") log.info(" ") # Get caller's pym-call-id or generate one call_id = request.headers.get('PymCallID', None) if not call_id: call_id = str(uuid.uuid4()) stack.top.call_id = call_id # Append current server to call path, or start one call_path = request.headers.get('PymCallPath', None) if call_path: call_path = "%s.%s" % (call_path, api_name) else: call_path = api_name stack.top.call_path = call_path if endpoint.param_in_body or endpoint.param_in_query: # Turn the flask request into something bravado-core can process... try: req = FlaskRequestProxy(request, endpoint.param_in_body) except BadRequest: ee = error_callback(ValidationError("Cannot parse json data: have you set 'Content-Type' to 'application/json'?")) return _responsify(api_spec, ee, 400) try: # Note: unmarshall validates parameters but does not fail # if extra unknown parameters are submitted parameters = unmarshal_request(req, endpoint.operation) # Example of parameters: {'body': RegisterCredentials()} except jsonschema.exceptions.ValidationError as e: ee = error_callback(ValidationError(str(e))) return _responsify(api_spec, ee, 400) # Call the endpoint, with proper parameters depending on whether # parameters are in body, query or url args = [] kwargs = {} if endpoint.param_in_path: kwargs = path_params if endpoint.param_in_body: # Remove the parameters already defined in path_params for k in list(path_params.keys()): del parameters[k] lst = list(parameters.values()) assert len(lst) == 1 args.append(lst[0]) if endpoint.param_in_query: kwargs.update(parameters) result = handler_func(*args, **kwargs) if not result: e = error_callback(PyMacaronCoreException("Have nothing to send in response")) return _responsify(api_spec, e, 500) # Did we get the expected response? if endpoint.produces_html: if type(result) is not tuple: e = error_callback(PyMacaronCoreException("Method %s should return %s but returned %s" % (endpoint.handler_server, endpoint.produces, type(result)))) return _responsify(api_spec, e, 500) # Return an html page return result elif endpoint.produces_json: if not hasattr(result, '__module__') or not hasattr(result, '__class__'): e = error_callback(PyMacaronCoreException("Method %s did not return a class instance but a %s" % (endpoint.handler_server, type(result)))) return _responsify(api_spec, e, 500) # If it's already a flask Response, just pass it through. # Errors in particular may be either passed back as flask Responses, or # raised as exceptions to be caught and formatted by the error_callback result_type = result.__module__ + "." + result.__class__.__name__ if result_type == 'flask.wrappers.Response': return result # We may have got a pymacaron Error instance, in which case # it has a http_reply() method... if hasattr(result, 'http_reply'): # Let's transform this Error into a flask Response log.info("Looks like a pymacaron error instance - calling .http_reply()") return result.http_reply() # Otherwise, assume no error occured and make a flask Response out of # the result. # TODO: check that result is an instance of a model expected as response from this endpoint result_json = api_spec.model_to_json(result) # Send a Flask Response with code 200 and result_json r = jsonify(result_json) r.status_code = 200 return r handler_wrapper = cross_origin(headers=['Content-Type', 'Authorization'])(handler_wrapper) # And encapsulate all in a global decorator, if given one if global_decorator: handler_wrapper = global_decorator(handler_wrapper) return handler_wrapper
def format_escape( foreground=None, background=None, bold=False, faint=False, italic=False, underline=False, blink=False, inverted=False ): """Returns the ANSI escape sequence to set character formatting. foreground Foreground colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour background Background colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bold Enable bold text (default: False) faint Enable faint text (default: False) italic Enable italic text (default: False) underline Enable underlined text (default: False) blink Enable blinky text (default: False) inverted Enable inverted text (default: False) """ fg_format = None if isinstance( foreground, int ): fg_format = ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( foreground ) else: fg_rgba = colour.normalise_rgba( foreground ) if fg_rgba[3] != 0: fg_format = ANSI_FORMAT_FOREGROUND_CMD.format( *fg_rgba[:3] ) bg_format = None if isinstance( background, int ): bg_format = ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( background ) else: bg_rgba = colour.normalise_rgba( background ) if bg_rgba[3] != 0: bg_format = ANSI_FORMAT_BACKGROUND_CMD.format( *bg_rgba[:3] ) colour_format = [] if fg_format is not None: colour_format.append( fg_format ) if bg_format is not None: colour_format.append( bg_format ) if bold: colour_format.append( ANSI_FORMAT_BOLD_CMD ) if faint: colour_format.append( ANSI_FORMAT_FAINT_CMD ) if italic: colour_format.append( ANSI_FORMAT_ITALIC_CMD ) if underline: colour_format.append( ANSI_FORMAT_UNDERLINE_CMD ) if blink: colour_format.append( ANSI_FORMAT_BLINK_CMD ) if inverted: colour_format.append( ANSI_FORMAT_INVERTED_CMD ) colour_format = ANSI_FORMAT_BASE.format( ';'.join( colour_format ) ) return colour_format
def format_string( string, foreground=None, background=None, reset=True, bold=False, faint=False, italic=False, underline=False, blink=False, inverted=False ): """Returns a Unicode string formatted with an ANSI escape sequence. string String to format foreground Foreground colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour background Background colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) bold Enable bold text (default: False) faint Enable faint text (default: False) italic Enable italic text (default: False) underline Enable underlined text (default: False) blink Enable blinky text (default: False) inverted Enable inverted text (default: False) """ colour_format = format_escape( foreground, background, bold, faint, italic, underline, blink, inverted ) reset_format = '' if not reset else ANSI_FORMAT_RESET return '{}{}{}'.format( colour_format, string, reset_format )
def format_pixels( top, bottom, reset=True, repeat=1 ): """Return the ANSI escape sequence to render two vertically-stacked pixels as a single monospace character. top Top colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour bottom Bottom colour to use. Accepted types: None, int (xterm palette ID), tuple (RGB, RGBA), Colour reset Reset the formatting at the end (default: True) repeat Number of horizontal pixels to render (default: 1) """ top_src = None if isinstance( top, int ): top_src = top else: top_rgba = colour.normalise_rgba( top ) if top_rgba[3] != 0: top_src = top_rgba bottom_src = None if isinstance( bottom, int ): bottom_src = bottom else: bottom_rgba = colour.normalise_rgba( bottom ) if bottom_rgba[3] != 0: bottom_src = bottom_rgba # short circuit for empty pixel if (top_src is None) and (bottom_src is None): return ' '*repeat string = '▀'*repeat; colour_format = [] if top_src == bottom_src: string = '█'*repeat elif (top_src is None) and (bottom_src is not None): string = '▄'*repeat if (top_src is None) and (bottom_src is not None): if isinstance( bottom_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *bottom_src[:3] ) ) else: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( top_src ) ) else: colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *top_src[:3] ) ) if top_src is not None and bottom_src is not None and top_src != bottom_src: if isinstance( top_src, int ): colour_format.append( ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( bottom_src ) ) else: colour_format.append( ANSI_FORMAT_BACKGROUND_CMD.format( *bottom_src[:3] ) ) colour_format = ANSI_FORMAT_BASE.format( ';'.join( colour_format ) ) reset_format = '' if not reset else ANSI_FORMAT_RESET return '{}{}{}'.format( colour_format, string, reset_format )
def format_image_iter( data_fetch, x_start=0, y_start=0, width=32, height=32, frame=0, columns=1, downsample=1 ): """Return the ANSI escape sequence to render a bitmap image. data_fetch Function that takes three arguments (x position, y position, and frame) and returns a Colour corresponding to the pixel stored there, or Transparent if the requested pixel is out of bounds. x_start Offset from the left of the image data to render from. Defaults to 0. y_start Offset from the top of the image data to render from. Defaults to 0. width Width of the image data to render. Defaults to 32. height Height of the image data to render. Defaults to 32. frame Single frame number/object, or a list to render in sequence. Defaults to frame 0. columns Number of frames to render per line (useful for printing tilemaps!). Defaults to 1. downsample Shrink larger images by printing every nth pixel only. Defaults to 1. """ frames = [] try: frame_iter = iter( frame ) frames = [f for f in frame_iter] except TypeError: frames = [frame] rows = math.ceil( len( frames )/columns ) for r in range( rows ): for y in range( 0, height, 2*downsample ): result = [] for c in range( min( (len( frames )-r*columns), columns ) ): row = [] for x in range( 0, width, downsample ): fr = frames[r*columns + c] c1 = data_fetch( x_start+x, y_start+y, fr ) c2 = data_fetch( x_start+x, y_start+y+downsample, fr ) row.append( (c1, c2) ) prev_pixel = None pointer = 0 while pointer < len( row ): start = pointer pixel = row[pointer] while pointer < len( row ) and (row[pointer] == pixel): pointer += 1 result.append( format_pixels( pixel[0], pixel[1], repeat=pointer-start ) ) yield ''.join( result ) return
def update_buffer_with_value( self, value, buffer, parent=None ): """Write a Python object into a byte array, using the field definition. value Input Python object to process. buffer Output byte array to encode value into. parent Parent block object where this Field is defined. Used for e.g. evaluating Refs. """ assert common.is_bytes( buffer ) self.validate( value, parent ) return
def get_end_offset( self, value, parent=None, index=None ): """Return the end offset of the Field's data. Useful for chainloading. value Input Python object to process. parent Parent block object where this Field is defined. Used for e.g. evaluating Refs. index Index of the Python object to measure from. Used if the Field takes a list of objects. """ return self.get_start_offset( value, parent, index ) + self.get_size( value, parent, index )
def nonalpha_split(string): '''Split 'string' along any punctuation or whitespace.''' return re.findall(r'[%s]+|[^%s]+' % (A, A), string, flags=FLAGS)
def syllable_split(string): '''Split 'string' into (stressed) syllables and punctuation/whitespace.''' p = r'\'[%s]+|`[%s]+|[%s]+|[^%s\'`\.]+|[^\.]{1}' % (A, A, A, A) return re.findall(p, string, flags=FLAGS)
def extract_words(string): '''Extract all alphabetic syllabified forms from 'string'.''' return re.findall(r'[%s]+[%s\.]*[%s]+' % (A, A, A), string, flags=FLAGS)
def init_threads(t=None, s=None): """Should define dummyThread class and dummySignal class""" global THREAD, SIGNAL THREAD = t or dummyThread SIGNAL = s or dummySignal
def thread_with_callback(on_error, on_done, requete_with_callback): """ Return a thread emiting `state_changed` between each sub-requests. :param on_error: callback str -> None :param on_done: callback object -> None :param requete_with_callback: Job to execute. monitor_callable -> None :return: Non started thread """ class C(THREAD): error = SIGNAL(str) done = SIGNAL(object) state_changed = SIGNAL(int, int) def __del__(self): self.wait() def run(self): try: r = requete_with_callback(self.state_changed.emit) except (ConnexionError, StructureError) as e: self.error.emit(str(e)) else: self.done.emit(r) th = C() th.error.connect(on_error) th.done.connect(on_done) return th
def protege_data(datas_str, sens): """ Used to crypt/decrypt data before saving locally. Override if securit is needed. bytes -> str when decrypting str -> bytes when crypting :param datas_str: When crypting, str. when decrypting bytes :param sens: True to crypt, False to decrypt """ return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8")
def build_parser(parser: argparse.ArgumentParser) -> None: """Build a parser for CLI arguments and options.""" parser.add_argument( '--delimiter', help='a delimiter for the samples (teeth) in the key', default=' ', ) parser.add_argument( '--encoding', help='the encoding of the population file', default='utf-8', ) parser.add_argument( '--nsamples', '-n', help='the number of random samples to take', type=int, default=6, dest='nteeth', ) parser.add_argument( '--population', '-p', help='{0}, or a path to a file of line-delimited items'.format( ', '.join(POPULATIONS.keys()), ), default='/usr/share/dict/words', ) parser.add_argument( '--stats', help='show statistics for the key', default=False, action='store_true', ) parser.add_argument( '--version', action='version', version='%(prog)s {0}'.format(__version__), )
def default_parser() -> argparse.ArgumentParser: """Create a parser for CLI arguments and options.""" parser = argparse.ArgumentParser( prog=CONSOLE_SCRIPT, formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) build_parser(parser) return parser
def key( seq: Sequence, tooth: Callable[[Sequence], str] = ( lambda seq: str(random.SystemRandom().choice(seq)).strip() ), nteeth: int = 6, delimiter: str = ' ', ) -> str: """Concatenate strings generated by the tooth function.""" return delimiter.join(tooth(seq) for _ in range(nteeth))
def main(argv: Sequence[str] = SYS_ARGV) -> int: """Execute CLI commands.""" args = default_parser().parse_args(argv) try: seq = POPULATIONS[args.population] # type: Sequence except KeyError: try: with open(args.population, 'r', encoding=args.encoding) as file_: seq = list(file_) except (OSError, UnicodeError) as ex: print(ex, file=sys.stderr) return 1 main_key = key(seq=seq, nteeth=args.nteeth, delimiter=args.delimiter) print(main_key) if args.stats: print('*', len(main_key), 'characters') print('*', args.nteeth, 'samples from a population of', len(seq)) print( '* entropy {sign} {nbits} bits'.format( sign='~' if args.delimiter else '<', nbits=round(math.log(len(seq), 2) * args.nteeth, 2), ), ) return 0
def add_firewalld_service(service, permanent=True): """ adds a firewall rule """ yum_install(packages=['firewalld']) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): p = '' if permanent: p = '--permanent' sudo('firewall-cmd --add-service %s %s' % (service, p)) sudo('systemctl reload firewalld')
def add_firewalld_port(port, permanent=True): """ adds a firewall rule """ yum_install(packages=['firewalld']) log_green('adding a new fw rule: %s' % port) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): p = '' if permanent: p = '--permanent' sudo('firewall-cmd --add-port %s %s' % (port, p)) sudo('systemctl restart firewalld')
def apt_add_repository_from_apt_string(apt_string, apt_file): """ adds a new repository file for apt """ apt_file_path = '/etc/apt/sources.list.d/%s' % apt_file if not file_contains(apt_file_path, apt_string.lower(), use_sudo=True): file_append(apt_file_path, apt_string.lower(), use_sudo=True) with hide('running', 'stdout'): sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
def arch(): """ returns the current cpu archictecture """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo('rpm -E %dist').strip() return result
def disable_openssh_rdns(distribution): """ Set 'UseDNS no' in openssh config to disable rDNS lookups On each request for a new channel openssh defaults to an rDNS lookup on the client IP. This can be slow, if it fails for instance, adding 10s of overhead to every request for a new channel (not connection). This can add a lot of time to a process that opens lots of channels (e.g. running several commands via fabric.) This function will disable rDNS lookups in the openssh config and reload ssh to adjust the running instance. :param bytes distribution: the name of the distribution running on the node. """ log_green('Disabling openssh reverse dns lookups') openssh_config_file = '/etc/ssh/sshd_config' dns_config = 'UseDNS no' if not file_contains(openssh_config_file, dns_config, use_sudo=True): file_append(openssh_config_file, dns_config, use_sudo=True) service_name = 'sshd' if 'ubuntu' in distribution: service_name = 'ssh' sudo('service {} reload'.format(service_name))
def connect_to_ec2(region, access_key_id, secret_access_key): """ returns a connection object to AWS EC2 """ conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) return conn
def connect_to_rackspace(region, access_key_id, secret_access_key): """ returns a connection object to Rackspace """ pyrax.set_setting('identity_type', 'rackspace') pyrax.set_default_region(region) pyrax.set_credentials(access_key_id, secret_access_key) nova = pyrax.connect_to_cloudservers(region=region) return nova
def create_gce_image(zone, project, instance_name, name, description): """ Shuts down the instance and creates and image from the disk. Assumes that the disk name is the same as the instance_name (this is the default behavior for boot disks on GCE). """ disk_name = instance_name try: down_gce(instance_name=instance_name, project=project, zone=zone) except HttpError as e: if e.resp.status == 404: log_yellow("the instance {} is already down".format(instance_name)) else: raise e body = { "rawDisk": {}, "name": name, "sourceDisk": "projects/{}/zones/{}/disks/{}".format( project, zone, disk_name ), "description": description } compute = _get_gce_compute() gce_wait_until_done( compute.images().insert(project=project, body=body).execute() ) return name
def create_image(cloud, **kwargs): """ proxy call for ec2, rackspace create ami backend functions """ if cloud == 'ec2': return create_ami(**kwargs) if cloud == 'rackspace': return create_rackspace_image(**kwargs) if cloud == 'gce': return create_gce_image(**kwargs)
def create_server(cloud, **kwargs): """ Create a new instance """ if cloud == 'ec2': _create_server_ec2(**kwargs) elif cloud == 'rackspace': _create_server_rackspace(**kwargs) elif cloud == 'gce': _create_server_gce(**kwargs) else: raise ValueError("Unknown cloud type: {}".format(cloud))
def gce_wait_until_done(operation): """ Perform a GCE operation, blocking until the operation completes. This function will then poll the operation until it reaches state 'DONE' or times out, and then returns the final operation resource dict. :param operation: A dict representing a pending GCE operation resource. :returns dict: A dict representing the concluded GCE operation resource. """ operation_name = operation['name'] if 'zone' in operation: zone_url_parts = operation['zone'].split('/') project = zone_url_parts[-3] zone = zone_url_parts[-1] def get_zone_operation(): return _get_gce_compute().zoneOperations().get( project=project, zone=zone, operation=operation_name ) update = get_zone_operation else: project = operation['selfLink'].split('/')[-4] def get_global_operation(): return _get_gce_compute().globalOperations().get( project=project, operation=operation_name ) update = get_global_operation done = False latest_operation = None start = time() timeout = 5*60 # seconds while not done: latest_operation = update().execute() log_yellow("waiting for operation") if (latest_operation['status'] == 'DONE' or time() - start > timeout): done = True else: sleep(10) print "waiting for operation" return latest_operation
def startup_gce_instance(instance_name, project, zone, username, machine_type, image, public_key, disk_name=None): """ For now, jclouds is broken for GCE and we will have static slaves in Jenkins. Use this to boot them. """ log_green("Started...") log_yellow("...Creating GCE Jenkins Slave Instance...") instance_config = get_gce_instance_config( instance_name, project, zone, machine_type, image, username, public_key, disk_name ) operation = _get_gce_compute().instances().insert( project=project, zone=zone, body=instance_config ).execute() result = gce_wait_until_done(operation) if not result: raise RuntimeError("Creation of VM timed out or returned no result") log_green("Instance has booted")
def _create_server_ec2(region, access_key_id, secret_access_key, disk_name, disk_size, ami, key_pair, instance_type, username, tags={}, security_groups=None): """ Creates EC2 Instance and saves it state in a local json file """ conn = connect_to_ec2(region, access_key_id, secret_access_key) log_green("Started...") log_yellow("...Creating EC2 instance...") # we need a larger boot device to store our cached images ebs_volume = EBSBlockDeviceType() ebs_volume.size = disk_size bdm = BlockDeviceMapping() bdm[disk_name] = ebs_volume # get an ec2 ami image object with our choosen ami image = conn.get_all_images(ami)[0] # start a new instance reservation = image.run(1, 1, key_name=key_pair, security_groups=security_groups, block_device_map=bdm, instance_type=instance_type) # and get our instance_id instance = reservation.instances[0] # and loop and wait until ssh is available while instance.state == u'pending': log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green("Instance state: %s" % instance.state) wait_for_ssh(instance.public_dns_name) # update the EBS volumes to be deleted on instance termination for dev, bd in instance.block_device_mapping.items(): instance.modify_attribute('BlockDeviceMapping', ["%s=%d" % (dev, 1)]) # add a tag to our instance conn.create_tags([instance.id], tags) log_green("Public dns: %s" % instance.public_dns_name) # finally save the details or our new instance into the local state file save_ec2_state_locally(instance_id=instance.id, region=region, username=username, access_key_id=access_key_id, secret_access_key=secret_access_key)
def _create_server_rackspace(region, access_key_id, secret_access_key, disk_name, disk_size, ami, key_pair, instance_type, username, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ nova = connect_to_rackspace(region, access_key_id, secret_access_key) log_yellow("Creating Rackspace instance...") flavor = nova.flavors.find(name=instance_type) image = nova.images.find(name=ami) server = nova.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = nova.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) # finally save the details or our new instance into the local state file save_rackspace_state_locally(instance_id=server.id, region=region, username=username, access_key_id=access_key_id, secret_access_key=secret_access_key)
def dir_attribs(location, mode=None, owner=None, group=None, recursive=False, use_sudo=False): """ cuisine dir_attribs doesn't do sudo, so we implement our own Updates the mode/owner/group for the given remote directory.""" recursive = recursive and "-R " or "" if mode: if use_sudo: sudo('chmod %s %s %s' % (recursive, mode, location)) else: run('chmod %s %s %s' % (recursive, mode, location)) if owner: if use_sudo: sudo('chown %s %s %s' % (recursive, owner, location)) else: run('chown %s %s %s' % (recursive, owner, location)) if group: if use_sudo: sudo('chgrp %s %s %s' % (recursive, group, location)) else: run('chgrp %s %s %s' % (recursive, group, location))
def disable_selinux(): """ disables selinux """ if contains(filename='/etc/selinux/config', text='SELINUX=enforcing'): sed('/etc/selinux/config', 'SELINUX=enforcing', 'SELINUX=disabled', use_sudo=True) if contains(filename='/etc/selinux/config', text='SELINUX=permissive'): sed('/etc/selinux/config', 'SELINUX=permissive', 'SELINUX=disabled', use_sudo=True) if sudo('getenforce').lower() != 'disabled': with settings(warn_only=True, capture=True): sudo('/sbin/reboot') sleep_for_one_minute()
def destroy_ebs_volume(region, volume_id, access_key_id, secret_access_key): """ destroys an ebs volume """ conn = connect_to_ec2(region, access_key_id, secret_access_key) if ebs_volume_exists(region, volume_id, access_key_id, secret_access_key): log_yellow('destroying EBS volume ...') conn.delete_volume(volume_id)
def destroy_ec2(region, instance_id, access_key_id, secret_access_key): """ terminates the instance """ conn = connect_to_ec2(region, access_key_id, secret_access_key) data = get_ec2_info(instance_id=instance_id, region=region, access_key_id=access_key_id, secret_access_key=secret_access_key, username=None) instance = conn.terminate_instances(instance_ids=[data['id']])[0] log_yellow('destroying instance ...') while instance.state != "terminated": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() volume_id = data['volume'] if volume_id: destroy_ebs_volume(region, volume_id, access_key_id, secret_access_key) os.unlink('data.json')
def destroy_rackspace(region, instance_id, access_key_id, secret_access_key): """ terminates the instance """ nova = connect_to_rackspace(region, access_key_id, secret_access_key) server = nova.servers.get(instance_id) log_yellow('deleting rackspace instance ...') server.delete() # wait for server to be deleted try: while True: server = nova.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except: pass log_green('The server has been deleted') os.unlink('data.json')
def down_ec2(instance_id, region, access_key_id, secret_access_key): """ shutdown of an existing EC2 instance """ conn = connect_to_ec2(region, access_key_id, secret_access_key) # get the instance_id from the state file, and stop the instance instance = conn.stop_instances(instance_ids=instance_id)[0] while instance.state != "stopped": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green('Instance state: %s' % instance.state)
def ebs_volume_exists(region, volume_id, access_key_id, secret_access_key): """ finds out if a ebs volume exists """ conn = connect_to_ec2(region, access_key_id, secret_access_key) for vol in conn.get_all_volumes(): if vol.id == volume_id: return True
def enable_marathon_basic_authentication(principal, password): """ configures marathon to start with authentication """ upstart_file = '/etc/init/marathon.conf' with hide('running', 'stdout'): sudo('echo -n "{}" > /etc/marathon-mesos.credentials'.format(password)) boot_args = ' '.join(['exec', '/usr/bin/marathon', '--http_credentials', '"{}:{}"'.format(principal, password), '--mesos_authentication_principal', principal, '--mesos_authentication_secret_file', '/etc/marathon-mesos.credentials']) # check if the init conf file contains the exact user and password if not file_contains(upstart_file, boot_args, use_sudo=True): sed(upstart_file, 'exec /usr/bin/marathon.*', boot_args, use_sudo=True) file_attribs(upstart_file, mode=700, sudo=True) restart_service('marathon')
def enable_mesos_basic_authentication(principal, password): """ enables and adds a new authorized principal """ restart = False secrets_file = '/etc/mesos/secrets' secrets_entry = '%s %s' % (principal, password) if not file_contains(filename=secrets_file, text=secrets_entry, use_sudo=True): file_append(filename=secrets_file, text=secrets_entry, use_sudo=True) file_attribs(secrets_file, mode=700, sudo=True) restart = True # set new startup parameters for mesos-master with quiet(): if secrets_file not in sudo('cat /etc/mesos-master/credentials'): sudo('echo %s > /etc/mesos-master/credentials' % secrets_file) restart = True if not exists('/etc/mesos-master/\?authenticate', use_sudo=True): sudo('touch /etc/mesos-master/\?authenticate') file_attribs('/etc/mesos-master/\?authenticate', mode=700, sudo=True) restart = True if restart: restart_service('mesos-master')
def file_attribs(location, mode=None, owner=None, group=None, sudo=False): """Updates the mode/owner/group for the remote file at the given location.""" return dir_attribs(location, mode, owner, group, False, sudo)
def get_ec2_info(instance_id, region, access_key_id, secret_access_key, username): """ queries EC2 for details about a particular instance_id """ conn = connect_to_ec2(region, access_key_id, secret_access_key) instance = conn.get_only_instances( filters={'instance_id': instance_id} )[0] data = {} data['public_dns_name'] = instance.public_dns_name data['id'] = instance.id data['instance_type'] = instance.instance_type data['ip_address'] = instance.ip_address data['architecture'] = instance.architecture data['state'] = instance.state data['region'] = region data['cloud_type'] = 'ec2' data['username'] = username # find out the distribution running on the instance if username is not None: wait_for_ssh(data['ip_address']) with settings(host_string=username + '@' + data['ip_address']): data['distribution'] = linux_distribution(username, data['ip_address']) data['os_release'] = os_release(username, data['ip_address']) try: volume = conn.get_all_volumes( filters={'attachment.instance-id': instance.id})[0].id data['volume'] = volume except: data['volume'] = '' return data
def get_ip_address_from_rackspace_server(server_id): """ returns an ipaddress for a rackspace instance """ nova = connect_to_rackspace() server = nova.servers.get(server_id) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = None for network in server.networks['public']: if re.match('\d+\.\d+\.\d+\.\d+', network): ip_address = network break # find out if we have an ip address if ip_address is None: log_red('No IP address assigned') return False else: return ip_address
def get_rackspace_info(server_id, region, access_key_id, secret_access_key, username): """ queries Rackspace for details about a particular server id """ nova = connect_to_rackspace(region, access_key_id, secret_access_key) server = nova.servers.get(server_id) data = {} data['id'] = server.id # this needs to be tackled data['ip_address'] = server.accessIPv4 data['state'] = server.status data['region'] = region data['cloud_type'] = 'rackspace' data['username'] = username # find out the distribution running on the instance if username is not None: wait_for_ssh(data['ip_address']) with settings(host_string=username + '@' + data['ip_address']): data['distribution'] = linux_distribution(username, data['ip_address']) data['os_release'] = os_release(username, data['ip_address']) data['volume'] = '' return data
def install_oracle_java(distribution, java_version): """ installs oracle java """ if 'ubuntu' in distribution: accept_oracle_license = ('echo ' 'oracle-java' + java_version + 'installer ' 'shared/accepted-oracle-license-v1-1 ' 'select true | ' '/usr/bin/debconf-set-selections') with settings(hide('running', 'stdout')): sudo(accept_oracle_license) with settings(hide('running', 'stdout'), prompts={"Press [ENTER] to continue or ctrl-c to cancel adding it": "yes"}): # noqa sudo("yes | add-apt-repository ppa:webupd8team/java") with settings(hide('running', 'stdout')): sudo('DEBIAN_FRONTEND=noninteractive apt-get update') apt_install(packages=['oracle-java8-installer', 'oracle-java8-set-default'])
def install_mesos_single_box_mode(distribution): """ install mesos (all of it) on a single node""" if 'ubuntu' in distribution: log_green('adding mesosphere apt-key') apt_add_key(keyid='E56151BF') os = lsb_release() apt_string = 'deb http://repos.mesosphere.io/%s %s main' % ( os['DISTRIB_ID'], os['DISTRIB_CODENAME']) log_green('adding mesosphere apt repository') apt_add_repository_from_apt_string(apt_string, 'mesosphere.list') log_green('installing ubuntu development tools') install_ubuntu_development_tools() install_oracle_java(distribution, '8') log_green('installing mesos and marathon') apt_install(packages=['mesos', 'marathon']) if not file_contains('/etc/default/mesos-master', 'MESOS_QUORUM=1', use_sudo=True): file_append('/etc/default/mesos-master', 'MESOS_QUORUM=1', use_sudo=True) log_green('restarting services...') for svc in ['zookeeper', 'mesos-master', 'mesos-slave', 'marathon']: restart_service(svc) if not file_contains('/etc/mesos-slave/work_dir', '/data/mesos', use_sudo=True): file_append('/etc/mesos-slave/work_dir', '/data/mesos', use_sudo=True) log_green('restarting services...') for svc in ['mesos-slave']: restart_service(svc) log_green('enabling nginx autoindex on /...') with quiet(): cmd = 'cat /etc/nginx/sites-available/default' contents = sudo(cmd).replace('\n', ' ').replace('\r', '') if not bool(re.search('.*#*location \/ {.*autoindex on;.*', contents)): insert_line_in_file_after_regex( path='/etc/nginx/sites-available/default', line=' autoindex on;', after_regex='^[^#]*location \/ {', use_sudo=True) log_green('restarting nginx') restart_service('nginx')
def insert_line_in_file_after_regex(path, line, after_regex, use_sudo=False): """ inserts a line in the middle of a file """ tmpfile = str(uuid.uuid4()) get_file(path, tmpfile, use_sudo=use_sudo) with open(tmpfile) as f: original = f.read() if line not in original: outfile = str(uuid.uuid4()) with open(outfile, 'w') as output: for l in original.split('\n'): output.write(l + '\n') if re.match(after_regex, l) is not None: output.write(line + '\n') upload_file(local_path=outfile, remote_path=path, use_sudo=use_sudo) os.unlink(outfile) os.unlink(tmpfile)
def install_python_module(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): run('pip --quiet install %s' % name)
def install_python_module_locally(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): local('pip --quiet install %s' % name)
def install_system_gem(gem): """ install a particular gem """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): sudo("gem install %s --no-rdoc --no-ri" % gem)
def is_vagrant_plugin_installed(plugin, use_sudo=False): """ checks if vagrant plugin is installed """ cmd = 'vagrant plugin list' if use_sudo: results = sudo(cmd) else: results = run(cmd) installed_plugins = [] for line in results: plugin = re.search('^(\S.*) \((.*)\)$', line) installed_plugins.append({'name': plugin.group(0), 'version': plugin.group(1)}) return installed_plugins
def is_deb_package_installed(pkg): """ checks if a particular deb package is installed """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo('dpkg-query -l "%s" | grep -q ^.i' % pkg) return not bool(result.return_code)
def is_ssh_available(host, port=22): """ checks if ssh port is open """ s = socket.socket() try: s.connect((host, port)) return True except: return False
def os_release(username, ip_address): """ returns /etc/os-release in a dictionary """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): _os_release = {} with settings(host_string=username + '@' + ip_address): data = run('cat /etc/os-release') for line in data.split('\n'): if not line: continue parts = line.split('=') if len(parts) == 2: _os_release[parts[0]] = parts[1].strip('\n\r"') return _os_release
def load_state_from_disk(): """ loads the state from a local data.json file """ if is_there_state(): with open('data.json', 'r') as f: data = json.load(f) return data else: return False
def print_ec2_info(region, instance_id, access_key_id, secret_access_key, username): """ outputs information about our EC2 instance """ data = get_ec2_info(instance_id=instance_id, region=region, access_key_id=access_key_id, secret_access_key=secret_access_key, username=username) log_green("region: %s" % data['region']) log_green("Instance_type: %s" % data['instance_type']) log_green("Instance state: %s" % data['state']) log_green("Public dns: %s" % data['public_dns_name']) log_green("Ip address: %s" % data['ip_address']) log_green("volume: %s" % data['volume']) log_green("user: %s" % data['username']) log_green("ssh -i %s %s@%s" % (env.key_filename, username, data['ip_address']))
def print_gce_info(zone, project, instance_name, data): """ outputs information about our Rackspace instance """ try: instance_info = _get_gce_compute().instances().get( project=project, zone=zone, instance=instance_name ).execute() log_yellow(pformat(instance_info)) log_green("Instance state: %s" % instance_info['status']) log_green("Ip address: %s" % data['ip_address']) except HttpError as e: if e.resp.status != 404: raise e log_yellow("Instance state: DOWN") log_green("project: %s" % project) log_green("zone: %s" % zone) log_green("disk_name: %s" % instance_name) log_green("user: %s" % data['username']) log_green("ssh -i %s %s@%s" % (env.key_filename, data['username'], data['ip_address']))
def print_rackspace_info(region, instance_id, access_key_id, secret_access_key, username): """ outputs information about our Rackspace instance """ data = get_rackspace_info(server_id=instance_id, region=region, access_key_id=access_key_id, secret_access_key=secret_access_key, username=username) log_green("region: %s" % data['region']) log_green("Instance state: %s" % data['state']) log_green("Ip address: %s" % data['ip_address']) log_green("volume: %s" % data['volume']) log_green("user: %s" % data['username']) log_green("ssh -i %s %s@%s" % (env.key_filename, username, data['ip_address']))
def restart_service(service): """ restarts a service """ with settings(hide('running', 'stdout'), warn_only=True): log_yellow('stoping service %s' % service) sudo('service %s stop' % service) log_yellow('starting service %s' % service) sudo('service %s start' % service)
def rsync(): """ syncs the src code to the remote box """ log_green('syncing code to remote box...') data = load_state_from_disk() if 'SOURCE_PATH' in os.environ: with lcd(os.environ['SOURCE_PATH']): local("rsync -a " "--info=progress2 " "--exclude .git " "--exclude .tox " "--exclude .vagrant " "--exclude venv " ". " "-e 'ssh -C -i " + env.ec2_key_filename + "' " "%s@%s:" % (env.user, data['ip_address'])) else: print('please export SOURCE_PATH before running rsync') exit(1)
def save_ec2_state_locally(instance_id, region, username, access_key_id, secret_access_key): """ queries EC2 for details about a particular instance_id and stores those details locally """ # retrieve the IP information from the instance data = get_ec2_info(instance_id, region, access_key_id, secret_access_key, username) return _save_state_locally(data)
def ssh_session(key_filename, username, ip_address, *cli): """ opens a ssh shell to the host """ local('ssh -t -i %s %s@%s %s' % (key_filename, username, ip_address, "".join(chain.from_iterable(cli))))
def up_ec2(region, access_key_id, secret_access_key, instance_id, username): """ boots an existing ec2_instance """ conn = connect_to_ec2(region, access_key_id, secret_access_key) # boot the ec2 instance instance = conn.start_instances(instance_ids=instance_id)[0] while instance.state != "running": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() # the ip_address has changed so we need to get the latest data from ec2 data = get_ec2_info(instance_id=instance_id, region=region, access_key_id=access_key_id, secret_access_key=secret_access_key, username=username) # and make sure we don't return until the instance is fully up wait_for_ssh(data['ip_address']) # lets update our local state file with the new ip_address save_ec2_state_locally(instance_id=instance_id, region=region, username=username, access_key_id=access_key_id, secret_access_key=secret_access_key) env.hosts = data['ip_address'] print_ec2_info(region, instance_id, access_key_id, secret_access_key, username)
def wait_for_ssh(host, port=22, timeout=600): """ probes the ssh port and waits until it is available """ log_yellow('waiting for ssh...') for iteration in xrange(1, timeout): #noqa sleep(1) if is_ssh_available(host, port): return True else: log_yellow('waiting for ssh...')
def _get_visuals(user): """ Renvoi les éléments graphiques d'un utilisateur. :param user: Dictionnaire d'infos de l'utilisateur :return QPixmap,QLabel: Image et nom """ pixmap = SuperUserAvatar() if user["status"] == "admin" else UserAvatar() label = user["label"] return pixmap, QLabel(label)