INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Given an URI, detects plugin and encoding and imports into a `rows.Table`
def import_from_uri( uri, default_encoding="utf-8", verify_ssl=True, progress=False, *args, **kwargs ): "Given an URI, detects plugin and encoding and imports into a `rows.Table`" # TODO: support '-' also # TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can # skip encoding detection. source = detect_source(uri, verify_ssl=verify_ssl, progress=progress) return import_from_source(source, default_encoding, *args, **kwargs)
Given a `rows.Table` and an URI, detects plugin (from URI) and exports
def export_to_uri(table, uri, *args, **kwargs): "Given a `rows.Table` and an URI, detects plugin (from URI) and exports" # TODO: support '-' also plugin_name = plugin_name_by_uri(uri) try: export_function = getattr(rows, "export_to_{}".format(plugin_name)) except AttributeError: raise ValueError('Plugin (export) "{}" not found'.format(plugin_name)) return export_function(table, uri, *args, **kwargs)
Return a text-based file object from a filename, even if compressed
def open_compressed(filename, mode="r", encoding=None): "Return a text-based file object from a filename, even if compressed" # TODO: integrate this function in the library itself, using # get_filename_and_fobj binary_mode = "b" in mode extension = str(filename).split(".")[-1].lower() if binary_mode and encoding: raise ValueError("encoding should not be specified in binary mode") if extension == "xz": if lzma is None: raise RuntimeError("lzma support is not installed") fobj = lzma.open(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "gz": fobj = gzip.GzipFile(filename, mode=mode) if binary_mode: return fobj else: return io.TextIOWrapper(fobj, encoding=encoding) elif extension == "bz2": if bz2 is None: raise RuntimeError("bzip2 support is not installed") if binary_mode: # ignore encoding return bz2.open(filename, mode=mode) else: if "t" not in mode: # For some reason, passing only mode='r' to bzip2 is equivalent # to 'rb', not 'rt', so we force it here. mode += "t" return bz2.open(filename, mode=mode, encoding=encoding) else: if binary_mode: return open(filename, mode=mode) else: return open(filename, mode=mode, encoding=encoding)
Export a CSV file to SQLite, based on field type detection from samples
def csv_to_sqlite( input_filename, output_filename, samples=None, dialect=None, batch_size=10000, encoding="utf-8", callback=None, force_types=None, chunk_size=8388608, table_name="table1", schema=None, ): "Export a CSV file to SQLite, based on field type detection from samples" # TODO: automatically detect encoding if encoding == `None` # TODO: should be able to specify fields # TODO: if table_name is "2019" the final name will be "field_2019" - must # be "table_2019" # TODO: if schema is provided and the names are in uppercase, this function # will fail if dialect is None: # Get a sample to detect dialect fobj = open_compressed(input_filename, mode="rb") sample = fobj.read(chunk_size) dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Identify data types fobj = open_compressed(input_filename, encoding=encoding) data = list(islice(csv.DictReader(fobj, dialect=dialect), samples)) schema = rows.import_from_dicts(data).fields if force_types is not None: schema.update(force_types) # Create lazy table object to be converted # TODO: this lazyness feature will be incorported into the library soon so # we can call here `rows.import_from_csv` instead of `csv.reader`. reader = csv.reader( open_compressed(input_filename, encoding=encoding), dialect=dialect ) header = make_header(next(reader)) # skip header table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header])) table._rows = reader # Export to SQLite return rows.export_to_sqlite( table, output_filename, table_name=table_name, batch_size=batch_size, callback=callback, )
Export a table inside a SQLite database to CSV
def sqlite_to_csv( input_filename, table_name, output_filename, dialect=csv.excel, batch_size=10000, encoding="utf-8", callback=None, query=None, ): """Export a table inside a SQLite database to CSV""" # TODO: should be able to specify fields # TODO: should be able to specify custom query if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if query is None: query = "SELECT * FROM {}".format(table_name) connection = sqlite3.Connection(input_filename) cursor = connection.cursor() result = cursor.execute(query) header = [item[0] for item in cursor.description] fobj = open_compressed(output_filename, mode="w", encoding=encoding) writer = csv.writer(fobj, dialect=dialect) writer.writerow(header) total_written = 0 for batch in rows.plugins.utils.ipartition(result, batch_size): writer.writerows(batch) written = len(batch) total_written += written if callback: callback(written, total_written) fobj.close()
Execute a command and return its output
def execute_command(command): """Execute a command and return its output""" command = shlex.split(command) try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: raise RuntimeError("Command not found: {}".format(repr(command))) process.wait() # TODO: may use another codec to decode if process.returncode > 0: stderr = process.stderr.read().decode("utf-8") raise ValueError("Error executing command: {}".format(repr(stderr))) return process.stdout.read().decode("utf-8")
Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value.
def uncompressed_size(filename): """Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value. """ quoted_filename = shlex.quote(filename) # TODO: get filetype from file-magic, if available if str(filename).lower().endswith(".xz"): output = execute_command('xz --list "{}"'.format(quoted_filename)) compressed, uncompressed = regexp_sizes.findall(output) value, unit = uncompressed.split() value = float(value.replace(",", "")) return int(value * MULTIPLIERS[unit]) elif str(filename).lower().endswith(".gz"): # XXX: gzip only uses 32 bits to store uncompressed size, so if the # uncompressed size is greater than 4GiB, the value returned will be # incorrect. output = execute_command('gzip --list "{}"'.format(quoted_filename)) lines = [line.split() for line in output.splitlines()] header, data = lines[0], lines[1] gzip_data = dict(zip(header, data)) return int(gzip_data["uncompressed"]) else: raise ValueError('Unrecognized file type for "{}".'.format(filename))
Import data from CSV into PostgreSQL using the fastest method Required: psql command
def pgimport( filename, database_uri, table_name, encoding="utf-8", dialect=None, create_table=True, schema=None, callback=None, timeout=0.1, chunk_size=8388608, max_samples=10000, ): """Import data from CSV into PostgreSQL using the fastest method Required: psql command """ fobj = open_compressed(filename, mode="r", encoding=encoding) sample = fobj.read(chunk_size) if dialect is None: # Detect dialect dialect = rows.plugins.csv.discover_dialect( sample.encode(encoding), encoding=encoding ) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Detect field names reader = csv.reader(io.StringIO(sample), dialect=dialect) field_names = [slug(field_name) for field_name in next(reader)] else: field_names = list(schema.keys()) if create_table: if schema is None: data = [ dict(zip(field_names, row)) for row in itertools.islice(reader, max_samples) ] table = rows.import_from_dicts(data) field_types = [table.fields[field_name] for field_name in field_names] else: field_types = list(schema.values()) columns = [ "{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE)) for name, type_ in zip(field_names, field_types) ] create_table = SQL_CREATE_TABLE.format( table_name=table_name, field_types=", ".join(columns) ) execute_command(get_psql_command(create_table, database_uri=database_uri)) # Prepare the `psql` command to be executed based on collected metadata command = get_psql_copy_command( database_uri=database_uri, dialect=dialect, direction="FROM", encoding=encoding, header=field_names, table_name=table_name, ) rows_imported, error = 0, None fobj = open_compressed(filename, mode="rb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) data = fobj.read(chunk_size) total_written = 0 while data != b"": written = process.stdin.write(data) total_written += written if callback: callback(written, total_written) data = fobj.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) rows_imported = int(stdout.replace(b"COPY ", b"").strip()) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written, "rows_imported": rows_imported}
Export data from PostgreSQL into a CSV file using the fastest method Required: psql command
def pgexport( database_uri, table_name, filename, encoding="utf-8", dialect=csv.excel, callback=None, timeout=0.1, chunk_size=8388608, ): """Export data from PostgreSQL into a CSV file using the fastest method Required: psql command """ if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) # Prepare the `psql` command to be executed to export data command = get_psql_copy_command( database_uri=database_uri, direction="TO", encoding=encoding, header=None, # Needed when direction = 'TO' table_name=table_name, dialect=dialect, ) fobj = open_compressed(filename, mode="wb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) total_written = 0 data = process.stdout.read(chunk_size) while data != b"": written = fobj.write(data) total_written += written if callback: callback(written, total_written) data = process.stdout.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written}
Generate table schema for a specific output format and write Current supported output formats: 'txt', 'sql' and 'django'. The table name and all fields names pass for a slugifying process (table name is taken from file name).
def generate_schema(table, export_fields, output_format, output_fobj): """Generate table schema for a specific output format and write Current supported output formats: 'txt', 'sql' and 'django'. The table name and all fields names pass for a slugifying process (table name is taken from file name). """ if output_format in ("csv", "txt"): from rows import plugins data = [ { "field_name": fieldname, "field_type": fieldtype.__name__.replace("Field", "").lower(), } for fieldname, fieldtype in table.fields.items() if fieldname in export_fields ] table = plugins.dicts.import_from_dicts( data, import_fields=["field_name", "field_type"] ) if output_format == "txt": plugins.txt.export_to_txt(table, output_fobj) elif output_format == "csv": plugins.csv.export_to_csv(table, output_fobj) elif output_format == "sql": # TODO: may use dict from rows.plugins.sqlite or postgresql sql_fields = { rows.fields.BinaryField: "BLOB", rows.fields.BoolField: "BOOL", rows.fields.IntegerField: "INT", rows.fields.FloatField: "FLOAT", rows.fields.PercentField: "FLOAT", rows.fields.DateField: "DATE", rows.fields.DatetimeField: "DATETIME", rows.fields.TextField: "TEXT", rows.fields.DecimalField: "FLOAT", rows.fields.EmailField: "TEXT", rows.fields.JSONField: "TEXT", } fields = [ " {} {}".format(field_name, sql_fields[field_type]) for field_name, field_type in table.fields.items() if field_name in export_fields ] sql = ( dedent( """ CREATE TABLE IF NOT EXISTS {name} ( {fields} ); """ ) .strip() .format(name=table.name, fields=",\n".join(fields)) + "\n" ) output_fobj.write(sql) elif output_format == "django": django_fields = { rows.fields.BinaryField: "BinaryField", rows.fields.BoolField: "BooleanField", rows.fields.IntegerField: "IntegerField", rows.fields.FloatField: "FloatField", rows.fields.PercentField: "DecimalField", rows.fields.DateField: "DateField", rows.fields.DatetimeField: "DateTimeField", rows.fields.TextField: "TextField", rows.fields.DecimalField: "DecimalField", rows.fields.EmailField: "EmailField", rows.fields.JSONField: "JSONField", } table_name = "".join(word.capitalize() for word in table.name.split("_")) lines = ["from django.db import models"] if rows.fields.JSONField in [ table.fields[field_name] for field_name in export_fields ]: lines.append("from django.contrib.postgres.fields import JSONField") lines.append("") lines.append("class {}(models.Model):".format(table_name)) for field_name, field_type in table.fields.items(): if field_name not in export_fields: continue if field_type is not rows.fields.JSONField: django_type = "models.{}()".format(django_fields[field_type]) else: django_type = "JSONField()" lines.append(" {} = {}".format(field_name, django_type)) result = "\n".join(lines) + "\n" output_fobj.write(result)
Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField}
def load_schema(filename, context=None): """Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField} """ table = import_from_uri(filename) field_names = table.field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key.replace("Field", "").lower(): getattr(rows.fields, key) for key in dir(rows.fields) if "Field" in key and key != "Field" } return OrderedDict( [ (row.field_name, context[row.field_type]) for row in table ] )
Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError).
def get_items(*indexes): """Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError). """ return lambda obj: tuple( obj[index] if len(obj) > index else None for index in indexes )
Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen'
def slug(text, separator="_", permitted_chars=SLUG_CHARS): """Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen' """ text = six.text_type(text or "") # Strip non-ASCII characters # Example: u' ÁLVARO justen% ' -> ' ALVARO justen% ' text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii") # Replace word boundaries with separator text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text) # Remove non-permitted characters and put everything to lowercase # Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_' allowed_chars = list(permitted_chars) + [separator] text = "".join(char for char in text if char in allowed_chars).lower() # Remove double occurrencies of separator # Example: u'_alvaro__justen_' -> u'_alvaro_justen_' text = ( REGEXP_SEPARATOR if separator == "_" else re.compile("(" + re.escape(separator) + "+)") ).sub(separator, text) # Strip separators # Example: u'_alvaro_justen_' -> u'alvaro_justen' return text.strip(separator)
Return a unique name based on `name_format` and `name`.
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2): """Return a unique name based on `name_format` and `name`.""" index = start new_name = name while new_name in existing_names: new_name = name_format.format(name=name, index=index) index += 1 return new_name
Return unique and slugged field names.
def make_header(field_names, permit_not=False): """Return unique and slugged field names.""" slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug(field_name, permitted_chars=slug_chars) for field_name in field_names ] result = [] for index, field_name in enumerate(header): if not field_name: field_name = "field_{}".format(index) elif field_name[0].isdigit(): field_name = "field_{}".format(field_name) if field_name in result: field_name = make_unique_name( name=field_name, existing_names=result, start=2 ) result.append(field_name) return result
Detect column types (or "where the magic happens")
def detect_types( field_names, field_values, field_types=DEFAULT_TYPES, skip_indexes=None, type_detector=TypeDetector, fallback_type=TextField, *args, **kwargs ): """Detect column types (or "where the magic happens")""" # TODO: look strategy of csv.Sniffer.has_header # TODO: may receive 'type hints' detector = type_detector( field_names, field_types=field_types, fallback_type=fallback_type, skip_indexes=skip_indexes, ) detector.feed(field_values) return detector.fields
Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`.
def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value
Filter out objects outside table boundaries
def selected_objects(self): """Filter out objects outside table boundaries""" return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
Extract links from "project" field and remove HTML from all
def transform(row, table): 'Extract links from "project" field and remove HTML from all' data = row._asdict() data["links"] = " ".join(extract_links(row.project)) for key, value in data.items(): if isinstance(value, six.text_type): data[key] = extract_text(value) return data
Transform row "link" into full URL and add "state" based on "name"
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
Import data from a Parquet file and return with rows.Table.
def import_from_parquet(filename_or_fobj, *args, **kwargs): """Import data from a Parquet file and return with rows.Table.""" source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb") # TODO: should look into `schema.converted_type` also types = OrderedDict( [ (schema.name, PARQUET_TO_ROWS[schema.type]) for schema in parquet._read_footer(source.fobj).schema if schema.type is not None ] ) header = list(types.keys()) table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy meta = {"imported_from": "parquet", "source": source} return create_table( [header] + table_rows, meta=meta, force_types=types, *args, **kwargs )
Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used).
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
Export a `rows.Table` to a list of dicts
def export_to_dicts(table, *args, **kwargs): """Export a `rows.Table` to a list of dicts""" field_names = table.field_names return [{key: getattr(row, key) for key in field_names} for row in table]
>>> extract_intervals("1,2,3") [1, 2, 3] >>> extract_intervals("1,2,5-10") [1, 2, 5, 6, 7, 8, 9, 10] >>> extract_intervals("1,2,5-10,3") [1, 2, 3, 5, 6, 7, 8, 9, 10] >>> extract_intervals("1,2,5-10,6,7") [1, 2, 5, 6, 7, 8, 9, 10]
def extract_intervals(text, repeat=False, sort=True): """ >>> extract_intervals("1,2,3") [1, 2, 3] >>> extract_intervals("1,2,5-10") [1, 2, 5, 6, 7, 8, 9, 10] >>> extract_intervals("1,2,5-10,3") [1, 2, 3, 5, 6, 7, 8, 9, 10] >>> extract_intervals("1,2,5-10,6,7") [1, 2, 5, 6, 7, 8, 9, 10] """ result = [] for value in text.split(","): value = value.strip() if "-" in value: start_value, end_value = value.split("-") start_value = int(start_value.strip()) end_value = int(end_value.strip()) result.extend(range(start_value, end_value + 1)) else: result.append(int(value.strip())) if not repeat: result = list(set(result)) if sort: result.sort() return result
Return the cell value of the table passed by argument, based in row and column.
def cell_value(sheet, row, col): """Return the cell value of the table passed by argument, based in row and column.""" cell = sheet.cell(row, col) field_type = CELL_TYPES[cell.ctype] # TODO: this approach will not work if using locale value = cell.value if field_type is None: return None elif field_type is fields.TextField: if cell.ctype != xlrd.XL_CELL_BLANK: return value else: return "" elif field_type is fields.DatetimeField: if value == 0.0: return None try: time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode) except xlrd.xldate.XLDateTooLarge: return None value = field_type.serialize(datetime.datetime(*time_tuple)) return value.split("T00:00:00")[0] elif field_type is fields.BoolField: if value == 0: return False elif value == 1: return True elif cell.xf_index is None: return value # TODO: test else: book = sheet.book xf = book.xf_list[cell.xf_index] fmt = book.format_map[xf.format_key] if fmt.format_str.endswith("%"): # TODO: we may optimize this approach: we're converting to string # and the library is detecting the type when we could just say to # the library this value is PercentField if value is not None: try: decimal_places = len(fmt.format_str[:-1].split(".")[-1]) except IndexError: decimal_places = 2 return "{}%".format(str(round(value * 100, decimal_places))) else: return None elif type(value) == float and int(value) == value: return int(value) else: return value
Return a rows.Table created from imported XLS file.
def import_from_xls( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, *args, **kwargs ): """Return a rows.Table created from imported XLS file.""" source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="xls") source.fobj.close() book = xlrd.open_workbook( source.uri, formatting_info=True, logfile=open(os.devnull, mode="w") ) if sheet_name is not None: sheet = book.sheet_by_name(sheet_name) else: sheet = book.sheet_by_index(sheet_index) # TODO: may re-use Excel data types # Get header and rows # xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows accepts the same pattern # The xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no # transformation is needed min_row, min_column = get_table_start(sheet) max_row, max_column = sheet.nrows - 1, sheet.ncols - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `min_row` and `start_column` # starting from `min_col`. start_row = max(start_row if start_row is not None else min_row, min_row) end_row = min(end_row if end_row is not None else max_row, max_row) start_column = max( start_column if start_column is not None else min_column, min_column ) end_column = min(end_column if end_column is not None else max_column, max_column) table_rows = [ [ cell_value(sheet, row_index, column_index) for column_index in range(start_column, end_column + 1) ] for row_index in range(start_row, end_row + 1) ] meta = {"imported_from": "xls", "source": source, "name": sheet.name} return create_table(table_rows, meta=meta, *args, **kwargs)
Export the rows.Table to XLS file and return the saved file.
def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLS file and return the saved file.""" workbook = xlwt.Workbook() sheet = workbook.add_sheet(sheet_name) prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) for column_index, field_name in enumerate(field_names): sheet.write(0, column_index, field_name) _convert_row = _python_to_xls([table.fields.get(field) for field in field_names]) for row_index, row in enumerate(prepared_table, start=1): for column_index, (value, data) in enumerate(_convert_row(row)): sheet.write(row_index, column_index, value, **data) return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
Verify if a given table name is valid for `rows` Rules: - Should start with a letter or '_' - Letters can be capitalized or not - Accepts letters, numbers and _
def _valid_table_name(name): """Verify if a given table name is valid for `rows` Rules: - Should start with a letter or '_' - Letters can be capitalized or not - Accepts letters, numbers and _ """ if name[0] not in "_" + string.ascii_letters or not set(name).issubset( "_" + string.ascii_letters + string.digits ): return False else: return True
Find the position for each column separator in the given line If frame_style is 'None', this won work for column names that _start_ with whitespace (which includes non-lefthand aligned column titles)
def _parse_col_positions(frame_style, header_line): """Find the position for each column separator in the given line If frame_style is 'None', this won work for column names that _start_ with whitespace (which includes non-lefthand aligned column titles) """ separator = re.escape(FRAMES[frame_style.lower()]["VERTICAL"]) if frame_style == "None": separator = r"[\s]{2}[^\s]" # Matches two whitespaces followed by a non-whitespace. # Our column headers are serated by 3 spaces by default. col_positions = [] # Abuse regexp engine to anotate vertical-separator positions: re.sub(separator, lambda group: col_positions.append(group.start()), header_line) if frame_style == "None": col_positions.append(len(header_line) - 1) return col_positions
Return a rows.Table created from imported TXT file.
def import_from_txt( filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs ): """Return a rows.Table created from imported TXT file.""" # TODO: (maybe) # enable parsing of non-fixed-width-columns # with old algorithm - that would just split columns # at the vertical separator character for the frame. # (if doing so, include an optional parameter) # Also, this fixes an outstanding unreported issue: # trying to parse tables which fields values # included a Pipe char - "|" - would silently # yield bad results. source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding) raw_contents = source.fobj.read().decode(encoding).rstrip("\n") if frame_style is FRAME_SENTINEL: frame_style = _guess_frame_style(raw_contents) else: frame_style = _parse_frame_style(frame_style) contents = raw_contents.splitlines() del raw_contents if frame_style != "None": contents = contents[1:-1] del contents[1] else: # the table is possibly generated from other source. # check if the line we reserve as a separator is realy empty. if not contents[1].strip(): del contents[1] col_positions = _parse_col_positions(frame_style, contents[0]) table_rows = [ [ row[start + 1 : end].strip() for start, end in zip(col_positions, col_positions[1:]) ] for row in contents ] meta = { "imported_from": "txt", "source": source, "frame_style": frame_style, } return create_table(table_rows, meta=meta, *args, **kwargs)
Export a `rows.Table` to text. This function can return the result as a string or save into a file (via filename or file-like object). `encoding` could be `None` if no filename/file-like object is specified, then the return type will be `six.text_type`. `frame_style`: will select the frame style to be printed around data. Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default. Warning: no checks are made to check the desired encoding allows the characters needed by single and double frame styles. `safe_none_frame`: bool, defaults to True. Affects only output with frame_style == "None": column titles are left-aligned and have whitespace replaced for "_". This enables the output to be parseable. Otherwise, the generated table will look prettier but can not be imported back.
def export_to_txt( table, filename_or_fobj=None, encoding=None, frame_style="ASCII", safe_none_frame=True, *args, **kwargs ): """Export a `rows.Table` to text. This function can return the result as a string or save into a file (via filename or file-like object). `encoding` could be `None` if no filename/file-like object is specified, then the return type will be `six.text_type`. `frame_style`: will select the frame style to be printed around data. Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default. Warning: no checks are made to check the desired encoding allows the characters needed by single and double frame styles. `safe_none_frame`: bool, defaults to True. Affects only output with frame_style == "None": column titles are left-aligned and have whitespace replaced for "_". This enables the output to be parseable. Otherwise, the generated table will look prettier but can not be imported back. """ # TODO: will work only if table.fields is OrderedDict frame_style = _parse_frame_style(frame_style) frame = FRAMES[frame_style.lower()] serialized_table = serialize(table, *args, **kwargs) field_names = next(serialized_table) table_rows = list(serialized_table) max_sizes = _max_column_sizes(field_names, table_rows) dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names] if frame_style != "None" or not safe_none_frame: header = [field.center(max_sizes[field]) for field in field_names] else: header = [ field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names ] header = "{0} {1} {0}".format( frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header) ) top_split_line = ( frame["DOWN AND RIGHT"] + frame["DOWN AND HORIZONTAL"].join(dashes) + frame["DOWN AND LEFT"] ) body_split_line = ( frame["VERTICAL AND RIGHT"] + frame["VERTICAL AND HORIZONTAL"].join(dashes) + frame["VERTICAL AND LEFT"] ) botton_split_line = ( frame["UP AND RIGHT"] + frame["UP AND HORIZONTAL"].join(dashes) + frame["UP AND LEFT"] ) result = [] if frame_style != "None": result += [top_split_line] result += [header, body_split_line] for row in table_rows: values = [ value.rjust(max_sizes[field_name]) for field_name, value in zip(field_names, row) ] row_data = " {} ".format(frame["VERTICAL"]).join(values) result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data)) if frame_style != "None": result.append(botton_split_line) result.append("") data = "\n".join(result) if encoding is not None: data = data.encode(encoding) return export_data(filename_or_fobj, data, mode="wb")
Return a rows.Table with data from SQLite database.
def import_from_sqlite( filename_or_connection, table_name="table1", query=None, query_args=None, *args, **kwargs ): """Return a rows.Table with data from SQLite database.""" source = get_source(filename_or_connection) connection = source.fobj cursor = connection.cursor() if query is None: if not _valid_table_name(table_name): raise ValueError("Invalid table name: {}".format(table_name)) query = SQL_SELECT_ALL.format(table_name=table_name) if query_args is None: query_args = tuple() table_rows = list(cursor.execute(query, query_args)) # TODO: may be lazy header = [six.text_type(info[0]) for info in cursor.description] cursor.close() # TODO: should close connection also? meta = {"imported_from": "sqlite", "source": source} return create_table([header] + table_rows, meta=meta, *args, **kwargs)
Convert a PyOpenXL's `Cell` object to the corresponding Python object.
def _cell_to_python(cell): """Convert a PyOpenXL's `Cell` object to the corresponding Python object.""" data_type, value = cell.data_type, cell.value if type(cell) is EmptyCell: return None elif data_type == "f" and value == "=TRUE()": return True elif data_type == "f" and value == "=FALSE()": return False elif cell.number_format.lower() == "yyyy-mm-dd": return str(value).split(" 00:00:00")[0] elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss": return str(value).split(".")[0] elif cell.number_format.endswith("%") and isinstance(value, Number): value = Decimal(str(value)) return "{:%}".format(value) elif value is None: return "" else: return value
Return a rows.Table created from imported XLSX file. workbook_kwargs will be passed to openpyxl.load_workbook
def import_from_xlsx( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, workbook_kwargs=None, *args, **kwargs ): """Return a rows.Table created from imported XLSX file. workbook_kwargs will be passed to openpyxl.load_workbook """ workbook_kwargs = workbook_kwargs or {} if "read_only" not in workbook_kwargs: workbook_kwargs["read_only"] = True workbook = load_workbook(filename_or_fobj, **workbook_kwargs) if sheet_name is None: sheet_name = workbook.sheetnames[sheet_index] sheet = workbook[sheet_name] # The openpyxl library reads rows and columns starting from 1 and ending on # sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we # need to adjust the ranges accordingly. min_row, min_column = sheet.min_row - 1, sheet.min_column - 1 max_row, max_column = sheet.max_row - 1, sheet.max_column - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `sheet.min_row` and # `start_column` starting from `sheet.min_col`. start_row = start_row if start_row is not None else min_row end_row = end_row if end_row is not None else max_row start_column = start_column if start_column is not None else min_column end_column = end_column if end_column is not None else max_column table_rows = [] is_empty = lambda row: all(cell is None for cell in row) selected_rows = sheet.iter_rows( min_row=start_row + 1, max_row=end_row + 1, min_col=start_column + 1, max_col=end_column + 1, ) for row in selected_rows: row = [_cell_to_python(cell) for cell in row] if not is_empty(row): table_rows.append(row) source = Source.from_file(filename_or_fobj, plugin_name="xlsx") source.fobj.close() # TODO: pass a parameter to Source.from_file so it won't open the file metadata = {"imported_from": "xlsx", "source": source, "name": sheet_name} return create_table(table_rows, meta=metadata, *args, **kwargs)
Export the rows.Table to XLSX file and return the saved file.
def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLSX file and return the saved file.""" workbook = Workbook() sheet = workbook.active sheet.title = sheet_name prepared_table = prepare_to_export(table, *args, **kwargs) # Write header field_names = next(prepared_table) for col_index, field_name in enumerate(field_names): cell = sheet.cell(row=1, column=col_index + 1) cell.value = field_name # Write sheet rows _convert_row = _python_to_cell(list(map(table.fields.get, field_names))) for row_index, row in enumerate(prepared_table, start=1): for col_index, (value, number_format) in enumerate(_convert_row(row)): cell = sheet.cell(row=row_index + 1, column=col_index + 1) cell.value = value if number_format is not None: cell.number_format = number_format return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
Download organizations JSON and extract its properties
def download_organizations(): "Download organizations JSON and extract its properties" response = requests.get(URL) data = response.json() organizations = [organization["properties"] for organization in data["features"]] return rows.import_from_dicts(organizations)
Define table name based on its metadata (filename used on import) If `filename` is not available, return `table1`.
def name(self): """Define table name based on its metadata (filename used on import) If `filename` is not available, return `table1`. """ from rows.plugins.utils import slug name = self.meta.get("name", None) if name is not None: return slug(name) source = self.meta.get("source", None) if source and source.uri: return slug(os.path.splitext(Path(source.uri).name)[0]) return "table1"
Return rows.Table from HTML file.
def import_from_html( filename_or_fobj, encoding="utf-8", index=0, ignore_colspan=True, preserve_html=False, properties=False, table_tag="table", row_tag="tr", column_tag="td|th", *args, **kwargs ): """Return rows.Table from HTML file.""" source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) table = tables[index] # TODO: set meta's "name" from @id or @name (if available) strip_tags(table, "thead") strip_tags(table, "tbody") row_elements = table.xpath(row_tag) table_rows = [ _get_row( row, column_tag=column_tag, preserve_html=preserve_html, properties=properties, ) for row in row_elements ] if properties: table_rows[0][-1] = "properties" if preserve_html and kwargs.get("fields", None) is None: # The field names will be the first table row, so we need to strip HTML # from it even if `preserve_html` is `True` (it's `True` only for rows, # not for the header). table_rows[0] = list(map(_extract_node_text, row_elements[0])) if ignore_colspan: max_columns = max(map(len, table_rows)) table_rows = [row for row in table_rows if len(row) == max_columns] meta = {"imported_from": "html", "source": source} return create_table(table_rows, meta=meta, *args, **kwargs)
Export and return rows.Table data to HTML file.
def export_to_html( table, filename_or_fobj=None, encoding="utf-8", caption=False, *args, **kwargs ): """Export and return rows.Table data to HTML file.""" serialized_table = serialize(table, *args, **kwargs) fields = next(serialized_table) result = ["<table>\n\n"] if caption and table.name: result.extend([" <caption>", table.name, "</caption>\n\n"]) result.extend([" <thead>\n", " <tr>\n"]) # TODO: set @name/@id if self.meta["name"] is set header = [" <th> {} </th>\n".format(field) for field in fields] result.extend(header) result.extend([" </tr>\n", " </thead>\n", "\n", " <tbody>\n", "\n"]) for index, row in enumerate(serialized_table, start=1): css_class = "odd" if index % 2 == 1 else "even" result.append(' <tr class="{}">\n'.format(css_class)) for value in row: result.extend([" <td> ", escape(value), " </td>\n"]) result.append(" </tr>\n\n") result.append(" </tbody>\n\n</table>\n") html = "".join(result).encode(encoding) return export_data(filename_or_fobj, html, mode="wb")
Extract text from a given lxml node.
def _extract_node_text(node): """Extract text from a given lxml node.""" texts = map( six.text_type.strip, map(six.text_type, map(unescape, node.xpath(".//text()"))) ) return " ".join(text for text in texts if text)
Read a file passed by arg and return your table HTML tag count.
def count_tables(filename_or_fobj, encoding="utf-8", table_tag="table"): """Read a file passed by arg and return your table HTML tag count.""" source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) result = len(tables) if source.should_close: source.fobj.close() return result
Extract tag's attributes into a `dict`.
def tag_to_dict(html): """Extract tag's attributes into a `dict`.""" element = document_fromstring(html).xpath("//html/body/child::*")[0] attributes = dict(element.attrib) attributes["text"] = element.text_content() return attributes
Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data
def create_table( data, meta=None, fields=None, skip_header=True, import_fields=None, samples=None, force_types=None, max_rows=None, *args, **kwargs ): """Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data """ table_rows = iter(data) force_types = force_types or {} if import_fields is not None: import_fields = make_header(import_fields) # TODO: test max_rows if fields is None: # autodetect field types # TODO: may add `type_hints` parameter so autodetection can be easier # (plugins may specify some possible field types). header = make_header(next(table_rows)) if samples is not None: sample_rows = list(islice(table_rows, 0, samples)) table_rows = chain(sample_rows, table_rows) else: if max_rows is not None and max_rows > 0: sample_rows = table_rows = list(islice(table_rows, max_rows)) else: sample_rows = table_rows = list(table_rows) # Detect field types using only the desired columns detected_fields = detect_types( header, sample_rows, skip_indexes=[ index for index, field in enumerate(header) if field in force_types or field not in (import_fields or header) ], *args, **kwargs ) # Check if any field was added during detecting process new_fields = [ field_name for field_name in detected_fields.keys() if field_name not in header ] # Finally create the `fields` with both header and new field names, # based on detected fields `and force_types` fields = OrderedDict( [ (field_name, detected_fields.get(field_name, TextField)) for field_name in header + new_fields ] ) fields.update(force_types) # Update `header` and `import_fields` based on new `fields` header = list(fields.keys()) if import_fields is None: import_fields = header else: # using provided field types if not isinstance(fields, OrderedDict): raise ValueError("`fields` must be an `OrderedDict`") if skip_header: # If we're skipping the header probably this row is not trustable # (can be data or garbage). next(table_rows) header = make_header(list(fields.keys())) if import_fields is None: import_fields = header fields = OrderedDict( [(field_name, fields[key]) for field_name, key in zip(header, fields)] ) diff = set(import_fields) - set(header) if diff: field_names = ", ".join('"{}"'.format(field) for field in diff) raise ValueError("Invalid field names: {}".format(field_names)) fields = OrderedDict( [(field_name, fields[field_name]) for field_name in import_fields] ) get_row = get_items(*map(header.index, import_fields)) table = Table(fields=fields, meta=meta) if max_rows is not None and max_rows > 0: table_rows = islice(table_rows, max_rows) table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows) source = table.meta.get("source", None) if source is not None: if source.should_close: source.fobj.close() if source.should_delete and Path(source.uri).exists(): unlink(source.uri) return table
Return the object ready to be exported or only data if filename_or_fobj is not passed.
def export_data(filename_or_fobj, data, mode="w"): """Return the object ready to be exported or only data if filename_or_fobj is not passed.""" if filename_or_fobj is None: return data _, fobj = get_filename_and_fobj(filename_or_fobj, mode=mode) source = Source.from_file(filename_or_fobj, mode=mode, plugin_name=None) source.fobj.write(data) source.fobj.flush() return source.fobj
Read `sample` bytes from `fobj` and return the cursor to where it was.
def read_sample(fobj, sample): """Read `sample` bytes from `fobj` and return the cursor to where it was.""" cursor = fobj.tell() data = fobj.read(sample) fobj.seek(cursor) return data
Import data from a CSV file (automatically detects dialect). If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='rb')`.
def import_from_csv( filename_or_fobj, encoding="utf-8", dialect=None, sample_size=262144, *args, **kwargs ): """Import data from a CSV file (automatically detects dialect). If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='rb')`. """ source = Source.from_file( filename_or_fobj, plugin_name="csv", mode="rb", encoding=encoding ) if dialect is None: dialect = discover_dialect( sample=read_sample(source.fobj, sample_size), encoding=source.encoding ) reader = unicodecsv.reader(source.fobj, encoding=encoding, dialect=dialect) meta = {"imported_from": "csv", "source": source} return create_table(reader, meta=meta, *args, **kwargs)
Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents.
def export_to_csv( table, filename_or_fobj=None, encoding="utf-8", dialect=unicodecsv.excel, batch_size=100, callback=None, *args, **kwargs ): """Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents. """ # TODO: will work only if table.fields is OrderedDict # TODO: should use fobj? What about creating a method like json.dumps? return_data, should_close = False, None if filename_or_fobj is None: filename_or_fobj = BytesIO() return_data = should_close = True source = Source.from_file( filename_or_fobj, plugin_name="csv", mode="wb", encoding=encoding, should_close=should_close, ) # TODO: may use `io.BufferedWriter` instead of `ipartition` so user can # choose the real size (in Bytes) when to flush to the file system, instead # number of rows writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect) if callback is None: for batch in ipartition(serialize(table, *args, **kwargs), batch_size): writer.writerows(batch) else: serialized = serialize(table, *args, **kwargs) writer.writerow(next(serialized)) # First, write the header total = 0 for batch in ipartition(serialized, batch_size): writer.writerows(batch) total += len(batch) callback(total) if return_data: source.fobj.seek(0) result = source.fobj.read() else: source.fobj.flush() result = source.fobj if source.should_close: source.fobj.close() return result
Merge a list of `Table` objects using `keys` to group rows
def join(keys, tables): """Merge a list of `Table` objects using `keys` to group rows""" # Make new (merged) Table fields fields = OrderedDict() for table in tables: fields.update(table.fields) # TODO: may raise an error if a same field is different in some tables # Check if all keys are inside merged Table's fields fields_keys = set(fields.keys()) for key in keys: if key not in fields_keys: raise ValueError('Invalid key: "{}"'.format(key)) # Group rows by key, without missing ordering none_fields = lambda: OrderedDict({field: None for field in fields.keys()}) data = OrderedDict() for table in tables: for row in table: row_key = tuple([getattr(row, key) for key in keys]) if row_key not in data: data[row_key] = none_fields() data[row_key].update(row._asdict()) merged = Table(fields=fields) merged.extend(data.values()) return merged
Return a new table based on other tables and a transformation function
def transform(fields, function, *tables): "Return a new table based on other tables and a transformation function" new_table = Table(fields=fields) for table in tables: for row in filter(bool, map(lambda row: function(row, table), table)): new_table.append(row) return new_table
Initiate a sniffing task. Make sure we only have one sniff request running at any given time. If a finished sniffing request is around, collect its result (which can raise its exception).
def initiate_sniff(self, initial=False): """ Initiate a sniffing task. Make sure we only have one sniff request running at any given time. If a finished sniffing request is around, collect its result (which can raise its exception). """ if self.sniffing_task and self.sniffing_task.done(): try: if self.sniffing_task is not None: self.sniffing_task.result() except: if self.raise_on_sniff_error: raise finally: self.sniffing_task = None if self.sniffing_task is None: self.sniffing_task = ensure_future(self.sniff_hosts(initial), loop=self.loop)
Obtain a list of nodes from the cluster and create a new connection pool using the information retrieved. To extract the node connection parameters use the ``nodes_to_host_callback``. :arg initial: flag indicating if this is during startup (``sniff_on_start``), ignore the ``sniff_timeout`` if ``True``
def sniff_hosts(self, initial=False): """ Obtain a list of nodes from the cluster and create a new connection pool using the information retrieved. To extract the node connection parameters use the ``nodes_to_host_callback``. :arg initial: flag indicating if this is during startup (``sniff_on_start``), ignore the ``sniff_timeout`` if ``True`` """ node_info = yield from self._get_sniff_data(initial) hosts = list(filter(None, (self._get_host_info(n) for n in node_info))) # we weren't able to get any nodes, maybe using an incompatible # transport_schema or host_info_callback blocked all - raise error. if not hosts: raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found.") # remember current live connections orig_connections = self.connection_pool.connections[:] self.set_connections(hosts) # close those connections that are not in use any more for c in orig_connections: if c not in self.connection_pool.connections: yield from c.close()
Make a k8s pod specification for running a user notebook. Parameters ---------- name: Name of pod. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. image: Image specification - usually a image name and tag in the form of image_name:tag. Same thing you would use with docker commandline arguments image_pull_policy: Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides when kubernetes will check for a newer version of image and pull it when running a pod. image_pull_secret: Image pull secret - Default is None -- set to your secret name to pull from private docker registry. port: Port the notebook server is going to be listening on cmd: The command used to execute the singleuser server. node_selector: Dictionary Selector to match nodes where to launch the Pods run_as_uid: The UID used to run single-user pods. The default is to run as the user specified in the Dockerfile, if this is set to None. run_as_gid: The GID used to run single-user pods. The default is to run as the primary group of the user specified in the Dockerfile, if this is set to None. Setting this parameter requires that *feature-gate* **RunAsGroup** be enabled, otherwise the effective GID of the pod will be 0 (root). In addition, not setting `run_as_gid` once feature-gate RunAsGroup is enabled will also result in an effective GID of 0 (root). fs_gid The gid that will own any fresh volumes mounted into this pod, if using volume types that support this (such as GCE). This should be a group that the uid the process is running as should be a member of, so that it can read / write to the volumes mounted. supplemental_gids: A list of GIDs that should be set as additional supplemental groups to the user that the container runs as. You may have to set this if you are deploying to an environment with RBAC/SCC enforced and pods run with a 'restricted' SCC which results in the image being run as an assigned user ID. The supplemental group IDs would need to include the corresponding group ID of the user ID the image normally would run as. The image must setup all directories/files any application needs access to, as group writable. run_privileged: Whether the container should be run in privileged mode. env: Dictionary of environment variables. volumes: List of dictionaries containing the volumes of various types this pod will be using. See k8s documentation about volumes on how to specify these volume_mounts: List of dictionaries mapping paths in the container and the volume( specified in volumes) that should be mounted on them. See the k8s documentaiton for more details working_dir: String specifying the working directory for the notebook container labels: Labels to add to the spawned pod. annotations: Annotations to add to the spawned pod. cpu_limit: Float specifying the max number of CPU cores the user's pod is allowed to use. cpu_guarentee: Float specifying the max number of CPU cores the user's pod is guaranteed to have access to, by the scheduler. mem_limit: String specifying the max amount of RAM the user's pod is allowed to use. String instead of float/int since common suffixes are allowed mem_guarantee: String specifying the max amount of RAM the user's pod is guaranteed to have access to. String ins loat/int since common suffixes are allowed lifecycle_hooks: Dictionary of lifecycle hooks init_containers: List of initialization containers belonging to the pod. service_account: Service account to mount on the pod. None disables mounting extra_container_config: Extra configuration (e.g. envFrom) for notebook container which is not covered by parameters above. extra_pod_config: Extra configuration (e.g. tolerations) for pod which is not covered by parameters above. extra_containers: Extra containers besides notebook container. Used for some housekeeping jobs (e.g. crontab). scheduler_name: The pod's scheduler explicitly named. tolerations: Tolerations can allow a pod to schedule or execute on a tainted node. To learn more about pod tolerations, see https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/. Pass this field an array of "Toleration" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core node_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PreferredSchedulingTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#preferredschedulingterm-v1-core node_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "NodeSelectorTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core pod_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "WeightedPodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core pod_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core pod_anti_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "WeightedPodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core pod_anti_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core priority_class_name: The name of the PriorityClass to be assigned the pod. This feature is Beta available in K8s 1.11.
def make_pod( name, cmd, port, image, image_pull_policy, image_pull_secret=None, node_selector=None, run_as_uid=None, run_as_gid=None, fs_gid=None, supplemental_gids=None, run_privileged=False, env=None, working_dir=None, volumes=None, volume_mounts=None, labels=None, annotations=None, cpu_limit=None, cpu_guarantee=None, mem_limit=None, mem_guarantee=None, extra_resource_limits=None, extra_resource_guarantees=None, lifecycle_hooks=None, init_containers=None, service_account=None, extra_container_config=None, extra_pod_config=None, extra_containers=None, scheduler_name=None, tolerations=None, node_affinity_preferred=None, node_affinity_required=None, pod_affinity_preferred=None, pod_affinity_required=None, pod_anti_affinity_preferred=None, pod_anti_affinity_required=None, priority_class_name=None, logger=None, ): """ Make a k8s pod specification for running a user notebook. Parameters ---------- name: Name of pod. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. image: Image specification - usually a image name and tag in the form of image_name:tag. Same thing you would use with docker commandline arguments image_pull_policy: Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides when kubernetes will check for a newer version of image and pull it when running a pod. image_pull_secret: Image pull secret - Default is None -- set to your secret name to pull from private docker registry. port: Port the notebook server is going to be listening on cmd: The command used to execute the singleuser server. node_selector: Dictionary Selector to match nodes where to launch the Pods run_as_uid: The UID used to run single-user pods. The default is to run as the user specified in the Dockerfile, if this is set to None. run_as_gid: The GID used to run single-user pods. The default is to run as the primary group of the user specified in the Dockerfile, if this is set to None. Setting this parameter requires that *feature-gate* **RunAsGroup** be enabled, otherwise the effective GID of the pod will be 0 (root). In addition, not setting `run_as_gid` once feature-gate RunAsGroup is enabled will also result in an effective GID of 0 (root). fs_gid The gid that will own any fresh volumes mounted into this pod, if using volume types that support this (such as GCE). This should be a group that the uid the process is running as should be a member of, so that it can read / write to the volumes mounted. supplemental_gids: A list of GIDs that should be set as additional supplemental groups to the user that the container runs as. You may have to set this if you are deploying to an environment with RBAC/SCC enforced and pods run with a 'restricted' SCC which results in the image being run as an assigned user ID. The supplemental group IDs would need to include the corresponding group ID of the user ID the image normally would run as. The image must setup all directories/files any application needs access to, as group writable. run_privileged: Whether the container should be run in privileged mode. env: Dictionary of environment variables. volumes: List of dictionaries containing the volumes of various types this pod will be using. See k8s documentation about volumes on how to specify these volume_mounts: List of dictionaries mapping paths in the container and the volume( specified in volumes) that should be mounted on them. See the k8s documentaiton for more details working_dir: String specifying the working directory for the notebook container labels: Labels to add to the spawned pod. annotations: Annotations to add to the spawned pod. cpu_limit: Float specifying the max number of CPU cores the user's pod is allowed to use. cpu_guarentee: Float specifying the max number of CPU cores the user's pod is guaranteed to have access to, by the scheduler. mem_limit: String specifying the max amount of RAM the user's pod is allowed to use. String instead of float/int since common suffixes are allowed mem_guarantee: String specifying the max amount of RAM the user's pod is guaranteed to have access to. String ins loat/int since common suffixes are allowed lifecycle_hooks: Dictionary of lifecycle hooks init_containers: List of initialization containers belonging to the pod. service_account: Service account to mount on the pod. None disables mounting extra_container_config: Extra configuration (e.g. envFrom) for notebook container which is not covered by parameters above. extra_pod_config: Extra configuration (e.g. tolerations) for pod which is not covered by parameters above. extra_containers: Extra containers besides notebook container. Used for some housekeeping jobs (e.g. crontab). scheduler_name: The pod's scheduler explicitly named. tolerations: Tolerations can allow a pod to schedule or execute on a tainted node. To learn more about pod tolerations, see https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/. Pass this field an array of "Toleration" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core node_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PreferredSchedulingTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#preferredschedulingterm-v1-core node_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "NodeSelectorTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#nodeselectorterm-v1-core pod_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "WeightedPodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core pod_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core pod_anti_affinity_preferred: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "WeightedPodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#weightedpodaffinityterm-v1-core pod_anti_affinity_required: Affinities describe where pods prefer or require to be scheduled, they may prefer or require a node to have a certain label or be in proximity / remoteness to another pod. To learn more visit https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ Pass this field an array of "PodAffinityTerm" objects.* * https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podaffinityterm-v1-core priority_class_name: The name of the PriorityClass to be assigned the pod. This feature is Beta available in K8s 1.11. """ pod = V1Pod() pod.kind = "Pod" pod.api_version = "v1" pod.metadata = V1ObjectMeta( name=name, labels=(labels or {}).copy(), annotations=(annotations or {}).copy() ) pod.spec = V1PodSpec(containers=[]) pod.spec.restart_policy = 'OnFailure' security_context = V1PodSecurityContext() if fs_gid is not None: security_context.fs_group = int(fs_gid) if supplemental_gids is not None and supplemental_gids: security_context.supplemental_groups = [int(gid) for gid in supplemental_gids] if run_as_uid is not None: security_context.run_as_user = int(run_as_uid) if run_as_gid is not None: security_context.run_as_group = int(run_as_gid) pod.spec.security_context = security_context if image_pull_secret is not None: pod.spec.image_pull_secrets = [] image_secret = V1LocalObjectReference() image_secret.name = image_pull_secret pod.spec.image_pull_secrets.append(image_secret) if node_selector: pod.spec.node_selector = node_selector if lifecycle_hooks: lifecycle_hooks = get_k8s_model(V1Lifecycle, lifecycle_hooks) notebook_container = V1Container( name='notebook', image=image, working_dir=working_dir, ports=[V1ContainerPort(name='notebook-port', container_port=port)], env=[V1EnvVar(k, v) for k, v in (env or {}).items()], args=cmd, image_pull_policy=image_pull_policy, lifecycle=lifecycle_hooks, resources=V1ResourceRequirements(), volume_mounts=[get_k8s_model(V1VolumeMount, obj) for obj in (volume_mounts or [])], ) if service_account is None: # This makes sure that we don't accidentally give access to the whole # kubernetes API to the users in the spawned pods. pod.spec.automount_service_account_token = False else: pod.spec.service_account_name = service_account if run_privileged: notebook_container.security_context = V1SecurityContext(privileged=True) notebook_container.resources.requests = {} if cpu_guarantee: notebook_container.resources.requests['cpu'] = cpu_guarantee if mem_guarantee: notebook_container.resources.requests['memory'] = mem_guarantee if extra_resource_guarantees: notebook_container.resources.requests.update(extra_resource_guarantees) notebook_container.resources.limits = {} if cpu_limit: notebook_container.resources.limits['cpu'] = cpu_limit if mem_limit: notebook_container.resources.limits['memory'] = mem_limit if extra_resource_limits: notebook_container.resources.limits.update(extra_resource_limits) if extra_container_config: notebook_container = update_k8s_model( target=notebook_container, changes=extra_container_config, logger=logger, target_name="notebook_container", changes_name="extra_container_config", ) pod.spec.containers.append(notebook_container) if extra_containers: pod.spec.containers.extend([get_k8s_model(V1Container, obj) for obj in extra_containers]) if tolerations: pod.spec.tolerations = [get_k8s_model(V1Toleration, obj) for obj in tolerations] if init_containers: pod.spec.init_containers = [get_k8s_model(V1Container, obj) for obj in init_containers] if volumes: pod.spec.volumes = [get_k8s_model(V1Volume, obj) for obj in volumes] else: # Keep behaving exactly like before by not cleaning up generated pod # spec by setting the volumes field even though it is an empty list. pod.spec.volumes = [] if scheduler_name: pod.spec.scheduler_name = scheduler_name node_affinity = None if node_affinity_preferred or node_affinity_required: node_selector = None if node_affinity_required: node_selector = V1NodeSelector( node_selector_terms=[get_k8s_model(V1NodeSelectorTerm, obj) for obj in node_affinity_required], ) preferred_scheduling_terms = None if node_affinity_preferred: preferred_scheduling_terms = [get_k8s_model(V1PreferredSchedulingTerm, obj) for obj in node_affinity_preferred] node_affinity = V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=preferred_scheduling_terms, required_during_scheduling_ignored_during_execution=node_selector, ) pod_affinity = None if pod_affinity_preferred or pod_affinity_required: weighted_pod_affinity_terms = None if pod_affinity_preferred: weighted_pod_affinity_terms = [get_k8s_model(V1WeightedPodAffinityTerm, obj) for obj in pod_affinity_preferred] pod_affinity_terms = None if pod_affinity_required: pod_affinity_terms = [get_k8s_model(V1PodAffinityTerm, obj) for obj in pod_affinity_required] pod_affinity = V1PodAffinity( preferred_during_scheduling_ignored_during_execution=weighted_pod_affinity_terms, required_during_scheduling_ignored_during_execution=pod_affinity_terms, ) pod_anti_affinity = None if pod_anti_affinity_preferred or pod_anti_affinity_required: weighted_pod_affinity_terms = None if pod_anti_affinity_preferred: weighted_pod_affinity_terms = [get_k8s_model(V1WeightedPodAffinityTerm, obj) for obj in pod_anti_affinity_preferred] pod_affinity_terms = None if pod_anti_affinity_required: pod_affinity_terms = [get_k8s_model(V1PodAffinityTerm, obj) for obj in pod_anti_affinity_required] pod_anti_affinity = V1PodAffinity( preferred_during_scheduling_ignored_during_execution=weighted_pod_affinity_terms, required_during_scheduling_ignored_during_execution=pod_affinity_terms, ) affinity = None if (node_affinity or pod_affinity or pod_anti_affinity): affinity = V1Affinity( node_affinity=node_affinity, pod_affinity=pod_affinity, pod_anti_affinity=pod_anti_affinity, ) if affinity: pod.spec.affinity = affinity if priority_class_name: pod.spec.priority_class_name = priority_class_name if extra_pod_config: pod.spec = update_k8s_model( target=pod.spec, changes=extra_pod_config, logger=logger, target_name="pod.spec", changes_name="extra_pod_config", ) return pod
Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc
def make_pvc( name, storage_class, access_modes, storage, labels=None, annotations=None, ): """ Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc """ pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = (annotations or {}).copy() pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} if storage_class: pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.spec.storage_class_name = storage_class return pvc
Return a single shared kubernetes client instance A weak reference to the instance is cached, so that concurrent calls to shared_client will all return the same instance until all references to the client are cleared.
def shared_client(ClientType, *args, **kwargs): """Return a single shared kubernetes client instance A weak reference to the instance is cached, so that concurrent calls to shared_client will all return the same instance until all references to the client are cleared. """ kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs)) cache_key = (ClientType, args, kwarg_key) client = None if cache_key in _client_cache: # resolve cached weakref # client can still be None after this! client = _client_cache[cache_key]() if client is None: Client = getattr(kubernetes.client, ClientType) client = Client(*args, **kwargs) # cache weakref so that clients can be garbage collected _client_cache[cache_key] = weakref.ref(client) return client
Generate a unique name that's within a certain length limit Most k8s objects have a 63 char name limit. We wanna be able to compress larger names down to that if required, while still maintaining some amount of legibility about what the objects really are. If the length of the slug is shorter than the limit - hash_length, we just return slug directly. If not, we truncate the slug to (limit - hash_length) characters, hash the slug and append hash_length characters from the hash to the end of the truncated slug. This ensures that these names are always unique no matter what.
def generate_hashed_slug(slug, limit=63, hash_length=6): """ Generate a unique name that's within a certain length limit Most k8s objects have a 63 char name limit. We wanna be able to compress larger names down to that if required, while still maintaining some amount of legibility about what the objects really are. If the length of the slug is shorter than the limit - hash_length, we just return slug directly. If not, we truncate the slug to (limit - hash_length) characters, hash the slug and append hash_length characters from the hash to the end of the truncated slug. This ensures that these names are always unique no matter what. """ if len(slug) < (limit - hash_length): return slug slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest() return '{prefix}-{hash}'.format( prefix=slug[:limit - hash_length - 1], hash=slug_hash[:hash_length], ).lower()
Takes a model instance such as V1PodSpec() and updates it with another model, which is allowed to be a dict or another model instance of the same type. The logger is used to warn if any truthy value in the target is is overridden. The target_name parameter can for example be "pod.spec", and changes_name parameter could be "extra_pod_config". These parameters allows the logger to write out something more meaningful to the user whenever something is about to become overridden.
def update_k8s_model(target, changes, logger=None, target_name=None, changes_name=None): """ Takes a model instance such as V1PodSpec() and updates it with another model, which is allowed to be a dict or another model instance of the same type. The logger is used to warn if any truthy value in the target is is overridden. The target_name parameter can for example be "pod.spec", and changes_name parameter could be "extra_pod_config". These parameters allows the logger to write out something more meaningful to the user whenever something is about to become overridden. """ model_type = type(target) if not hasattr(target, 'attribute_map'): raise AttributeError("Attribute 'target' ({}) must be an object (such as 'V1PodSpec') with an attribute 'attribute_map'.".format(model_type.__name__)) if not isinstance(changes, model_type) and not isinstance(changes, dict): raise AttributeError("Attribute 'changes' ({}) must be an object of the same type as 'target' ({}) or a 'dict'.".format(type(changes).__name__, model_type.__name__)) changes_dict = _get_k8s_model_dict(model_type, changes) for key, value in changes_dict.items(): if key not in target.attribute_map: raise ValueError("The attribute 'changes' ({}) contained '{}' not modeled by '{}'.".format(type(changes).__name__, key, model_type.__name__)) # If changes are passed as a dict, they will only have a few keys/value # pairs representing the specific changes. If the changes parameter is a # model instance on the other hand, the changes parameter will have a # lot of default values as well. These default values, which are also # falsy, should not use to override the target's values. if isinstance(changes, dict) or value: if getattr(target, key): if logger and changes_name: warning = "'{}.{}' current value: '{}' is overridden with '{}', which is the value of '{}.{}'.".format( target_name, key, getattr(target, key), value, changes_name, key ) logger.warning(warning) setattr(target, key, value) return target
Returns an instance of type specified model_type from an model instance or represantative dictionary.
def get_k8s_model(model_type, model_dict): """ Returns an instance of type specified model_type from an model instance or represantative dictionary. """ model_dict = copy.deepcopy(model_dict) if isinstance(model_dict, model_type): return model_dict elif isinstance(model_dict, dict): # convert the dictionaries camelCase keys to snake_case keys model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) # use the dictionary keys to initialize a model of given type return model_type(**model_dict) else: raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
Returns a dictionary representation of a provided model type
def _get_k8s_model_dict(model_type, model): """ Returns a dictionary representation of a provided model type """ model = copy.deepcopy(model) if isinstance(model, model_type): return model.to_dict() elif isinstance(model, dict): return _map_dict_keys_to_model_attributes(model_type, model) else: raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__))
Maps a dict's keys to the provided models attributes using its attribute_map attribute. This is (always?) the same as converting camelCase to snake_case. Note that the function will not influence nested object's keys.
def _map_dict_keys_to_model_attributes(model_type, model_dict): """ Maps a dict's keys to the provided models attributes using its attribute_map attribute. This is (always?) the same as converting camelCase to snake_case. Note that the function will not influence nested object's keys. """ new_dict = {} for key, value in model_dict.items(): new_dict[_get_k8s_model_attribute(model_type, key)] = value return new_dict
Takes a model type and a Kubernetes API resource field name (such as "serviceAccount") and returns a related attribute name (such as "service_account") to be used with kubernetes.client.models objects. It is impossible to prove a negative but it seems like it is always a question of making camelCase to snake_case but by using the provided 'attribute_map' we also ensure that the fields actually exist. Example of V1PodSpec's attribute_map: { 'active_deadline_seconds': 'activeDeadlineSeconds', 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'containers': 'containers', 'dns_policy': 'dnsPolicy', 'host_aliases': 'hostAliases', 'host_ipc': 'hostIPC', 'host_network': 'hostNetwork', 'host_pid': 'hostPID', 'hostname': 'hostname', 'image_pull_secrets': 'imagePullSecrets', 'init_containers': 'initContainers', 'node_name': 'nodeName', 'node_selector': 'nodeSelector', 'priority': 'priority', 'priority_class_name': 'priorityClassName', 'restart_policy': 'restartPolicy', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'service_account': 'serviceAccount', 'service_account_name': 'serviceAccountName', 'subdomain': 'subdomain', 'termination_grace_period_seconds': 'terminationGracePeriodSeconds', 'tolerations': 'tolerations', 'volumes': 'volumes' }
def _get_k8s_model_attribute(model_type, field_name): """ Takes a model type and a Kubernetes API resource field name (such as "serviceAccount") and returns a related attribute name (such as "service_account") to be used with kubernetes.client.models objects. It is impossible to prove a negative but it seems like it is always a question of making camelCase to snake_case but by using the provided 'attribute_map' we also ensure that the fields actually exist. Example of V1PodSpec's attribute_map: { 'active_deadline_seconds': 'activeDeadlineSeconds', 'affinity': 'affinity', 'automount_service_account_token': 'automountServiceAccountToken', 'containers': 'containers', 'dns_policy': 'dnsPolicy', 'host_aliases': 'hostAliases', 'host_ipc': 'hostIPC', 'host_network': 'hostNetwork', 'host_pid': 'hostPID', 'hostname': 'hostname', 'image_pull_secrets': 'imagePullSecrets', 'init_containers': 'initContainers', 'node_name': 'nodeName', 'node_selector': 'nodeSelector', 'priority': 'priority', 'priority_class_name': 'priorityClassName', 'restart_policy': 'restartPolicy', 'scheduler_name': 'schedulerName', 'security_context': 'securityContext', 'service_account': 'serviceAccount', 'service_account_name': 'serviceAccountName', 'subdomain': 'subdomain', 'termination_grace_period_seconds': 'terminationGracePeriodSeconds', 'tolerations': 'tolerations', 'volumes': 'volumes' } """ # if we get "service_account", return if field_name in model_type.attribute_map: return field_name # if we get "serviceAccount", then return "service_account" for key, value in model_type.attribute_map.items(): if value == field_name: return key else: raise ValueError("'{}' did not have an attribute matching '{}'".format(model_type.__name__, field_name))
Update current list of resources by doing a full fetch. Overwrites all current resource info.
def _list_and_update(self): """ Update current list of resources by doing a full fetch. Overwrites all current resource info. """ initial_resources = getattr(self.api, self.list_method_name)( self.namespace, label_selector=self.label_selector, field_selector=self.field_selector, _request_timeout=self.request_timeout, ) # This is an atomic operation on the dictionary! self.resources = {p.metadata.name: p for p in initial_resources.items} # return the resource version so we can hook up a watch return initial_resources.metadata.resource_version
Keeps the current list of resources up-to-date This method is to be run not on the main thread! We first fetch the list of current resources, and store that. Then we register to be notified of changes to those resources, and keep our local store up-to-date based on these notifications. We also perform exponential backoff, giving up after we hit 32s wait time. This should protect against network connections dropping and intermittent unavailability of the api-server. Every time we recover from an exception we also do a full fetch, to pick up changes that might've been missed in the time we were not doing a watch. Note that we're playing a bit with fire here, by updating a dictionary in this thread while it is probably being read in another thread without using locks! However, dictionary access itself is atomic, and as long as we don't try to mutate them (do a 'fetch / modify / update' cycle on them), we should be ok!
def _watch_and_update(self): """ Keeps the current list of resources up-to-date This method is to be run not on the main thread! We first fetch the list of current resources, and store that. Then we register to be notified of changes to those resources, and keep our local store up-to-date based on these notifications. We also perform exponential backoff, giving up after we hit 32s wait time. This should protect against network connections dropping and intermittent unavailability of the api-server. Every time we recover from an exception we also do a full fetch, to pick up changes that might've been missed in the time we were not doing a watch. Note that we're playing a bit with fire here, by updating a dictionary in this thread while it is probably being read in another thread without using locks! However, dictionary access itself is atomic, and as long as we don't try to mutate them (do a 'fetch / modify / update' cycle on them), we should be ok! """ selectors = [] log_name = "" if self.label_selector: selectors.append("label selector=%r" % self.label_selector) if self.field_selector: selectors.append("field selector=%r" % self.field_selector) log_selector = ', '.join(selectors) cur_delay = 0.1 self.log.info( "watching for %s with %s in namespace %s", self.kind, log_selector, self.namespace, ) while True: self.log.debug("Connecting %s watcher", self.kind) start = time.monotonic() w = watch.Watch() try: resource_version = self._list_and_update() if not self.first_load_future.done(): # signal that we've loaded our initial data self.first_load_future.set_result(None) watch_args = { 'namespace': self.namespace, 'label_selector': self.label_selector, 'field_selector': self.field_selector, 'resource_version': resource_version, } if self.request_timeout: # set network receive timeout watch_args['_request_timeout'] = self.request_timeout if self.timeout_seconds: # set watch timeout watch_args['timeout_seconds'] = self.timeout_seconds # in case of timeout_seconds, the w.stream just exits (no exception thrown) # -> we stop the watcher and start a new one for ev in w.stream( getattr(self.api, self.list_method_name), **watch_args ): cur_delay = 0.1 resource = ev['object'] if ev['type'] == 'DELETED': # This is an atomic delete operation on the dictionary! self.resources.pop(resource.metadata.name, None) else: # This is an atomic operation on the dictionary! self.resources[resource.metadata.name] = resource if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break watch_duration = time.monotonic() - start if watch_duration >= self.restart_seconds: self.log.debug( "Restarting %s watcher after %i seconds", self.kind, watch_duration, ) break except ReadTimeoutError: # network read time out, just continue and restart the watch # this could be due to a network problem or just low activity self.log.warning("Read timeout watching %s, reconnecting", self.kind) continue except Exception: cur_delay = cur_delay * 2 if cur_delay > 30: self.log.exception("Watching resources never recovered, giving up") if self.on_failure: self.on_failure() return self.log.exception("Error when watching resources, retrying in %ss", cur_delay) time.sleep(cur_delay) continue else: # no events on watch, reconnect self.log.debug("%s watcher timeout", self.kind) finally: w.stop() if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break self.log.warning("%s watcher finished", self.kind)
Start the reflection process! We'll do a blocking read of all resources first, so that we don't race with any operations that are checking the state of the pod store - such as polls. This should be called only once at the start of program initialization (when the singleton is being created), and not afterwards!
def start(self): """ Start the reflection process! We'll do a blocking read of all resources first, so that we don't race with any operations that are checking the state of the pod store - such as polls. This should be called only once at the start of program initialization (when the singleton is being created), and not afterwards! """ if hasattr(self, 'watch_thread'): raise ValueError('Thread watching for resources is already running') self._list_and_update() self.watch_thread = threading.Thread(target=self._watch_and_update) # If the watch_thread is only thread left alive, exit app self.watch_thread.daemon = True self.watch_thread.start()
Make a pod manifest that will spawn current user's notebook pod.
def get_pod_manifest(self): """ Make a pod manifest that will spawn current user's notebook pod. """ if callable(self.uid): uid = yield gen.maybe_future(self.uid(self)) else: uid = self.uid if callable(self.gid): gid = yield gen.maybe_future(self.gid(self)) else: gid = self.gid if callable(self.fs_gid): fs_gid = yield gen.maybe_future(self.fs_gid(self)) else: fs_gid = self.fs_gid if callable(self.supplemental_gids): supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self)) else: supplemental_gids = self.supplemental_gids if self.cmd: real_cmd = self.cmd + self.get_args() else: real_cmd = None labels = self._build_pod_labels(self._expand_all(self.extra_labels)) annotations = self._build_common_annotations(self._expand_all(self.extra_annotations)) return make_pod( name=self.pod_name, cmd=real_cmd, port=self.port, image=self.image, image_pull_policy=self.image_pull_policy, image_pull_secret=self.image_pull_secrets, node_selector=self.node_selector, run_as_uid=uid, run_as_gid=gid, fs_gid=fs_gid, supplemental_gids=supplemental_gids, run_privileged=self.privileged, env=self.get_env(), volumes=self._expand_all(self.volumes), volume_mounts=self._expand_all(self.volume_mounts), working_dir=self.working_dir, labels=labels, annotations=annotations, cpu_limit=self.cpu_limit, cpu_guarantee=self.cpu_guarantee, mem_limit=self.mem_limit, mem_guarantee=self.mem_guarantee, extra_resource_limits=self.extra_resource_limits, extra_resource_guarantees=self.extra_resource_guarantees, lifecycle_hooks=self.lifecycle_hooks, init_containers=self._expand_all(self.init_containers), service_account=self.service_account, extra_container_config=self.extra_container_config, extra_pod_config=self.extra_pod_config, extra_containers=self._expand_all(self.extra_containers), scheduler_name=self.scheduler_name, tolerations=self.tolerations, node_affinity_preferred=self.node_affinity_preferred, node_affinity_required=self.node_affinity_required, pod_affinity_preferred=self.pod_affinity_preferred, pod_affinity_required=self.pod_affinity_required, pod_anti_affinity_preferred=self.pod_anti_affinity_preferred, pod_anti_affinity_required=self.pod_anti_affinity_required, priority_class_name=self.priority_class_name, logger=self.log, )
Make a pvc manifest that will spawn current user's pvc.
def get_pvc_manifest(self): """ Make a pvc manifest that will spawn current user's pvc. """ labels = self._build_common_labels(self._expand_all(self.storage_extra_labels)) labels.update({ 'component': 'singleuser-storage' }) annotations = self._build_common_annotations({}) return make_pvc( name=self.pvc_name, storage_class=self.storage_class, access_modes=self.storage_access_modes, storage=self.storage_capacity, labels=labels, annotations=annotations )
Check if the given pod is running pod must be a dictionary representing a Pod kubernetes API object.
def is_pod_running(self, pod): """ Check if the given pod is running pod must be a dictionary representing a Pod kubernetes API object. """ # FIXME: Validate if this is really the best way is_running = ( pod is not None and pod.status.phase == 'Running' and pod.status.pod_ip is not None and pod.metadata.deletion_timestamp is None and all([cs.ready for cs in pod.status.container_statuses]) ) return is_running
Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env
def get_env(self): """Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env """ env = super(KubeSpawner, self).get_env() # deprecate image env['JUPYTER_IMAGE_SPEC'] = self.image env['JUPYTER_IMAGE'] = self.image return env
Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running.
def poll(self): """ Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running. """ # have to wait for first load of data before we have a valid answer if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == 'Pending': return None ctr_stat = data.status.container_statuses if ctr_stat is None: # No status, no container (we hope) # This seems to happen when a pod is idle-culled. return 1 for c in ctr_stat: # return exit code if notebook container has terminated if c.name == 'notebook': if c.state.terminated: # call self.stop to delete the pod if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break # None means pod is running or starting up return None # pod doesn't exist or has been deleted return 1
Filter event-reflector to just our events Returns list of all events that match our pod_name since our ._last_event (if defined). ._last_event is set at the beginning of .start().
def events(self): """Filter event-reflector to just our events Returns list of all events that match our pod_name since our ._last_event (if defined). ._last_event is set at the beginning of .start(). """ if not self.event_reflector: return [] events = [] for event in self.event_reflector.events: if event.involved_object.name != self.pod_name: # only consider events for my pod name continue if self._last_event and event.metadata.uid == self._last_event: # saw last_event marker, ignore any previous events # and only consider future events # only include events *after* our _last_event marker events = [] else: events.append(event) return events
Start a shared reflector on the KubeSpawner class key: key for the reflector (e.g. 'pod' or 'events') Reflector: Reflector class to be instantiated kwargs: extra keyword-args to be relayed to ReflectorClass If replace=False and the pod reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors).
def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs): """Start a shared reflector on the KubeSpawner class key: key for the reflector (e.g. 'pod' or 'events') Reflector: Reflector class to be instantiated kwargs: extra keyword-args to be relayed to ReflectorClass If replace=False and the pod reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors). """ main_loop = IOLoop.current() def on_reflector_failure(): self.log.critical( "%s reflector failed, halting Hub.", key.title(), ) sys.exit(1) previous_reflector = self.__class__.reflectors.get(key) if replace or not previous_reflector: self.__class__.reflectors[key] = ReflectorClass( parent=self, namespace=self.namespace, on_failure=on_reflector_failure, **kwargs, ) if replace and previous_reflector: # we replaced the reflector, stop the old one previous_reflector.stop() # return the current reflector return self.__class__.reflectors[key]
Start the events reflector If replace=False and the event reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors).
def _start_watching_events(self, replace=False): """Start the events reflector If replace=False and the event reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors). """ return self._start_reflector( "events", EventReflector, fields={"involvedObject.kind": "Pod"}, replace=replace, )
Start the user's pod
def _start(self): """Start the user's pod""" # load user options (including profile) yield self.load_user_options() # record latest event so we don't include old # events from previous pods in self.events # track by order and name instead of uid # so we get events like deletion of a previously stale # pod if it's part of this spawn process events = self.events if events: self._last_event = events[-1].metadata.uid if self.storage_pvc_ensure: # Try and create the pvc. If it succeeds we are good. If # returns a 409 indicating it already exists we are good. If # it returns a 403, indicating potential quota issue we need # to see if pvc already exists before we decide to raise the # error for quota being exceeded. This is because quota is # checked before determining if the PVC needed to be # created. pvc = self.get_pvc_manifest() try: yield self.asynchronize( self.api.create_namespaced_persistent_volume_claim, namespace=self.namespace, body=pvc ) except ApiException as e: if e.status == 409: self.log.info("PVC " + self.pvc_name + " already exists, so did not create new pvc.") elif e.status == 403: t, v, tb = sys.exc_info() try: yield self.asynchronize( self.api.read_namespaced_persistent_volume_claim, name=self.pvc_name, namespace=self.namespace) except ApiException as e: raise v.with_traceback(tb) self.log.info("PVC " + self.pvc_name + " already exists, possibly have reached quota though.") else: raise # If we run into a 409 Conflict error, it means a pod with the # same name already exists. We stop it, wait for it to stop, and # try again. We try 4 times, and if it still fails we give up. # FIXME: Have better / cleaner retry logic! retry_times = 4 pod = yield self.get_pod_manifest() if self.modify_pod_hook: pod = yield gen.maybe_future(self.modify_pod_hook(self, pod)) for i in range(retry_times): try: yield self.asynchronize( self.api.create_namespaced_pod, self.namespace, pod, ) break except ApiException as e: if e.status != 409: # We only want to handle 409 conflict errors self.log.exception("Failed for %s", pod.to_str()) raise self.log.info('Found existing pod %s, attempting to kill', self.pod_name) # TODO: this should show up in events yield self.stop(True) self.log.info('Killed pod %s, will try starting singleuser pod again', self.pod_name) else: raise Exception( 'Can not create user pod %s already exists & could not be deleted' % self.pod_name) # we need a timeout here even though start itself has a timeout # in order for this coroutine to finish at some point. # using the same start_timeout here # essentially ensures that this timeout should never propagate up # because the handler will have stopped waiting after # start_timeout, starting from a slightly earlier point. try: yield exponential_backoff( lambda: self.is_pod_running(self.pod_reflector.pods.get(self.pod_name, None)), 'pod/%s did not start in %s seconds!' % (self.pod_name, self.start_timeout), timeout=self.start_timeout, ) except TimeoutError: if self.pod_name not in self.pod_reflector.pods: # if pod never showed up at all, # restart the pod reflector which may have become disconnected. self.log.error( "Pod %s never showed up in reflector, restarting pod reflector", self.pod_name, ) self._start_watching_pods(replace=True) raise pod = self.pod_reflector.pods[self.pod_name] self.pod_id = pod.metadata.uid if self.event_reflector: self.log.debug( 'pod %s events before launch: %s', self.pod_name, "\n".join( [ "%s [%s] %s" % (event.last_timestamp, event.type, event.message) for event in self.events ] ), ) return (pod.status.pod_ip, self.port)
Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined.
def _options_form_default(self): ''' Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined. ''' if not self.profile_list: return '' if callable(self.profile_list): return self._render_options_form_dynamically else: return self._render_options_form(self.profile_list)
get the option selected by the user on the form This only constructs the user_options dict, it should not actually load any options. That is done later in `.load_user_options()` Args: formdata: user selection returned by the form To access to the value, you can use the `get` accessor and the name of the html element, for example:: formdata.get('profile',[0]) to get the value of the form named "profile", as defined in `form_template`:: <select class="form-control" name="profile"...> </select> Returns: user_options (dict): the selected profile in the user_options form, e.g. ``{"profile": "8 CPUs"}``
def options_from_form(self, formdata): """get the option selected by the user on the form This only constructs the user_options dict, it should not actually load any options. That is done later in `.load_user_options()` Args: formdata: user selection returned by the form To access to the value, you can use the `get` accessor and the name of the html element, for example:: formdata.get('profile',[0]) to get the value of the form named "profile", as defined in `form_template`:: <select class="form-control" name="profile"...> </select> Returns: user_options (dict): the selected profile in the user_options form, e.g. ``{"profile": "8 CPUs"}`` """ if not self.profile_list or self._profile_list is None: return formdata # Default to first profile if somehow none is provided try: selected_profile = int(formdata.get('profile', [0])[0]) options = self._profile_list[selected_profile] except (TypeError, IndexError, ValueError): raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None)) return { 'profile': options['display_name'] }
Load a profile by name Called by load_user_options
def _load_profile(self, profile_name): """Load a profile by name Called by load_user_options """ # find the profile default_profile = self._profile_list[0] for profile in self._profile_list: if profile.get('default', False): # explicit default, not the first default_profile = profile if profile['display_name'] == profile_name: break else: if profile_name: # name specified, but not found raise ValueError("No such profile: %s. Options include: %s" % ( profile_name, ', '.join(p['display_name'] for p in self._profile_list) )) else: # no name specified, use the default profile = default_profile self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name']) kubespawner_override = profile.get('kubespawner_override', {}) for k, v in kubespawner_override.items(): if callable(v): v = v(self) self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v) else: self.log.debug(".. overriding KubeSpawner value %s=%s", k, v) setattr(self, k, v)
Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options.
def load_user_options(self): """Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options. """ if self._profile_list is None: if callable(self.profile_list): self._profile_list = yield gen.maybe_future(self.profile_list(self)) else: self._profile_list = self.profile_list if self._profile_list: yield self._load_profile(self.user_options.get('profile', None))
Add traits with .tag(config=True) to members list
def get_object_members(self, want_all): """Add traits with .tag(config=True) to members list""" check, members = super().get_object_members(want_all) get_traits = self.object.class_own_traits if self.options.inherited_members \ else self.object.class_traits trait_members = [] for name, trait in sorted(get_traits(config=True).items()): # put help in __doc__ where autodoc will look for it trait.__doc__ = trait.help trait_members.append((name, trait)) return check, trait_members + members
This is a generator function that enumerates all tacho motors that match the provided arguments. Parameters: name_pattern: pattern that device name should match. For example, 'motor*'. Default value: '*'. keyword arguments: used for matching the corresponding device attributes. For example, driver_name='lego-ev3-l-motor', or address=['outB', 'outC']. When argument value is a list, then a match against any entry of the list is enough.
def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs): """ This is a generator function that enumerates all tacho motors that match the provided arguments. Parameters: name_pattern: pattern that device name should match. For example, 'motor*'. Default value: '*'. keyword arguments: used for matching the corresponding device attributes. For example, driver_name='lego-ev3-l-motor', or address=['outB', 'outC']. When argument value is a list, then a match against any entry of the list is enough. """ class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Motor.SYSTEM_CLASS_NAME) return (Motor(name_pattern=name, name_exact=True) for name in list_device_names(class_path, name_pattern, **kwargs))
Return the native speed measurement required to achieve desired rotations-per-second
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired rotations-per-second """ assert abs(self.rotations_per_second) <= motor.max_rps,\ "invalid rotations-per-second: {} max RPS is {}, {} was requested".format( motor, motor.max_rps, self.rotations_per_second) return self.rotations_per_second/motor.max_rps * motor.max_speed
Return the native speed measurement required to achieve desired rotations-per-minute
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired rotations-per-minute """ assert abs(self.rotations_per_minute) <= motor.max_rpm,\ "invalid rotations-per-minute: {} max RPM is {}, {} was requested".format( motor, motor.max_rpm, self.rotations_per_minute) return self.rotations_per_minute/motor.max_rpm * motor.max_speed
Return the native speed measurement required to achieve desired degrees-per-second
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired degrees-per-second """ assert abs(self.degrees_per_second) <= motor.max_dps,\ "invalid degrees-per-second: {} max DPS is {}, {} was requested".format( motor, motor.max_dps, self.degrees_per_second) return self.degrees_per_second/motor.max_dps * motor.max_speed
Return the native speed measurement required to achieve desired degrees-per-minute
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired degrees-per-minute """ assert abs(self.degrees_per_minute) <= motor.max_dpm,\ "invalid degrees-per-minute: {} max DPM is {}, {} was requested".format( motor, motor.max_dpm, self.degrees_per_minute) return self.degrees_per_minute/motor.max_dpm * motor.max_speed
Returns the name of the port that this motor is connected to.
def address(self): """ Returns the name of the port that this motor is connected to. """ self._address, value = self.get_attr_string(self._address, 'address') return value
Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor.
def commands(self): """ Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor. """ (self._commands, value) = self.get_cached_attr_set(self._commands, 'commands') return value
Returns the number of tacho counts in one rotation of the motor. Tacho counts are used by the position and speed attributes, so you can use this value to convert rotations or degrees to tacho counts. (rotation motors only)
def count_per_rot(self): """ Returns the number of tacho counts in one rotation of the motor. Tacho counts are used by the position and speed attributes, so you can use this value to convert rotations or degrees to tacho counts. (rotation motors only) """ (self._count_per_rot, value) = self.get_cached_attr_int(self._count_per_rot, 'count_per_rot') return value
Returns the number of tacho counts in one meter of travel of the motor. Tacho counts are used by the position and speed attributes, so you can use this value to convert from distance to tacho counts. (linear motors only)
def count_per_m(self): """ Returns the number of tacho counts in one meter of travel of the motor. Tacho counts are used by the position and speed attributes, so you can use this value to convert from distance to tacho counts. (linear motors only) """ (self._count_per_m, value) = self.get_cached_attr_int(self._count_per_m, 'count_per_m') return value
Returns the name of the driver that provides this tacho motor device.
def driver_name(self): """ Returns the name of the driver that provides this tacho motor device. """ (self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name') return value
Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100.
def duty_cycle(self): """ Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100. """ self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle') return value
Writing sets the duty cycle setpoint. Reading returns the current value. Units are in percent. Valid values are -100 to 100. A negative value causes the motor to rotate in reverse.
def duty_cycle_sp(self): """ Writing sets the duty cycle setpoint. Reading returns the current value. Units are in percent. Valid values are -100 to 100. A negative value causes the motor to rotate in reverse. """ self._duty_cycle_sp, value = self.get_attr_int(self._duty_cycle_sp, 'duty_cycle_sp') return value
Returns the number of tacho counts in the full travel of the motor. When combined with the `count_per_m` atribute, you can use this value to calculate the maximum travel distance of the motor. (linear motors only)
def full_travel_count(self): """ Returns the number of tacho counts in the full travel of the motor. When combined with the `count_per_m` atribute, you can use this value to calculate the maximum travel distance of the motor. (linear motors only) """ (self._full_travel_count, value) = self.get_cached_attr_int(self._full_travel_count, 'full_travel_count') return value
Sets the polarity of the motor. With `normal` polarity, a positive duty cycle will cause the motor to rotate clockwise. With `inversed` polarity, a positive duty cycle will cause the motor to rotate counter-clockwise. Valid values are `normal` and `inversed`.
def polarity(self): """ Sets the polarity of the motor. With `normal` polarity, a positive duty cycle will cause the motor to rotate clockwise. With `inversed` polarity, a positive duty cycle will cause the motor to rotate counter-clockwise. Valid values are `normal` and `inversed`. """ self._polarity, value = self.get_attr_string(self._polarity, 'polarity') return value
Returns the current position of the motor in pulses of the rotary encoder. When the motor rotates clockwise, the position will increase. Likewise, rotating counter-clockwise causes the position to decrease. Writing will set the position to that value.
def position(self): """ Returns the current position of the motor in pulses of the rotary encoder. When the motor rotates clockwise, the position will increase. Likewise, rotating counter-clockwise causes the position to decrease. Writing will set the position to that value. """ self._position, value = self.get_attr_int(self._position, 'position') return value
The proportional constant for the position PID.
def position_p(self): """ The proportional constant for the position PID. """ self._position_p, value = self.get_attr_int(self._position_p, 'hold_pid/Kp') return value
The integral constant for the position PID.
def position_i(self): """ The integral constant for the position PID. """ self._position_i, value = self.get_attr_int(self._position_i, 'hold_pid/Ki') return value
The derivative constant for the position PID.
def position_d(self): """ The derivative constant for the position PID. """ self._position_d, value = self.get_attr_int(self._position_d, 'hold_pid/Kd') return value
Writing specifies the target position for the `run-to-abs-pos` and `run-to-rel-pos` commands. Reading returns the current value. Units are in tacho counts. You can use the value returned by `count_per_rot` to convert tacho counts to/from rotations or degrees.
def position_sp(self): """ Writing specifies the target position for the `run-to-abs-pos` and `run-to-rel-pos` commands. Reading returns the current value. Units are in tacho counts. You can use the value returned by `count_per_rot` to convert tacho counts to/from rotations or degrees. """ self._position_sp, value = self.get_attr_int(self._position_sp, 'position_sp') return value
Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed.
def max_speed(self): """ Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed. """ (self._max_speed, value) = self.get_cached_attr_int(self._max_speed, 'max_speed') return value
Returns the current motor speed in tacho counts per second. Note, this is not necessarily degrees (although it is for LEGO motors). Use the `count_per_rot` attribute to convert this value to RPM or deg/sec.
def speed(self): """ Returns the current motor speed in tacho counts per second. Note, this is not necessarily degrees (although it is for LEGO motors). Use the `count_per_rot` attribute to convert this value to RPM or deg/sec. """ self._speed, value = self.get_attr_int(self._speed, 'speed') return value
Writing sets the target speed in tacho counts per second used for all `run-*` commands except `run-direct`. Reading returns the current value. A negative value causes the motor to rotate in reverse with the exception of `run-to-*-pos` commands where the sign is ignored. Use the `count_per_rot` attribute to convert RPM or deg/sec to tacho counts per second. Use the `count_per_m` attribute to convert m/s to tacho counts per second.
def speed_sp(self): """ Writing sets the target speed in tacho counts per second used for all `run-*` commands except `run-direct`. Reading returns the current value. A negative value causes the motor to rotate in reverse with the exception of `run-to-*-pos` commands where the sign is ignored. Use the `count_per_rot` attribute to convert RPM or deg/sec to tacho counts per second. Use the `count_per_m` attribute to convert m/s to tacho counts per second. """ self._speed_sp, value = self.get_attr_int(self._speed_sp, 'speed_sp') return value
Writing sets the ramp up setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will increase from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_up_sp`.
def ramp_up_sp(self): """ Writing sets the ramp up setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will increase from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_up_sp`. """ self._ramp_up_sp, value = self.get_attr_int(self._ramp_up_sp, 'ramp_up_sp') return value