repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.username
|
python
|
def username(self, value):
self._username = value
self._connectionXML.set('username', value)
|
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L91-L103
| null |
class Connection(object):
"""A class representing connections inside Data Sources."""
def __init__(self, connxml):
"""Connection is usually instantiated by passing in connection elements
in a Data Source. If creating a connection from scratch you can call
`from_attributes` passing in the connection attributes.
"""
self._connectionXML = connxml
self._dbname = connxml.get('dbname')
self._server = connxml.get('server')
self._username = connxml.get('username')
self._authentication = connxml.get('authentication')
self._class = connxml.get('class')
self._port = connxml.get('port', None)
self._query_band = connxml.get('query-band-spec', None)
self._initial_sql = connxml.get('one-time-sql', None)
def __repr__(self):
return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self)))
@classmethod
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
@property
def dbname(self):
"""Database name for the connection. Not the table name."""
return self._dbname
@dbname.setter
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
@property
def server(self):
"""Hostname or IP address of the database server. May also be a URL in some connection types."""
return self._server
@server.setter
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
@property
def username(self):
"""Username used to authenticate to the database."""
return self._username
@username.setter
@property
def authentication(self):
return self._authentication
@property
def dbclass(self):
"""The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list
can be found in dbclass.py"""
return self._class
@dbclass.setter
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
@property
def port(self):
"""Port used to connect to the database."""
return self._port
@port.setter
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
@property
def query_band(self):
"""Query band passed on connection to database."""
return self._query_band
@query_band.setter
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
@property
def initial_sql(self):
"""Initial SQL to be run."""
return self._initial_sql
@initial_sql.setter
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.dbclass
|
python
|
def dbclass(self, value):
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
|
Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L116-L130
|
[
"def is_valid_dbclass(dbclass):\n return dbclass in KNOWN_DB_CLASSES\n"
] |
class Connection(object):
"""A class representing connections inside Data Sources."""
def __init__(self, connxml):
"""Connection is usually instantiated by passing in connection elements
in a Data Source. If creating a connection from scratch you can call
`from_attributes` passing in the connection attributes.
"""
self._connectionXML = connxml
self._dbname = connxml.get('dbname')
self._server = connxml.get('server')
self._username = connxml.get('username')
self._authentication = connxml.get('authentication')
self._class = connxml.get('class')
self._port = connxml.get('port', None)
self._query_band = connxml.get('query-band-spec', None)
self._initial_sql = connxml.get('one-time-sql', None)
def __repr__(self):
return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self)))
@classmethod
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
@property
def dbname(self):
"""Database name for the connection. Not the table name."""
return self._dbname
@dbname.setter
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
@property
def server(self):
"""Hostname or IP address of the database server. May also be a URL in some connection types."""
return self._server
@server.setter
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
@property
def username(self):
"""Username used to authenticate to the database."""
return self._username
@username.setter
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
@property
def authentication(self):
return self._authentication
@property
def dbclass(self):
"""The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list
can be found in dbclass.py"""
return self._class
@dbclass.setter
@property
def port(self):
"""Port used to connect to the database."""
return self._port
@port.setter
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
@property
def query_band(self):
"""Query band passed on connection to database."""
return self._query_band
@query_band.setter
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
@property
def initial_sql(self):
"""Initial SQL to be run."""
return self._initial_sql
@initial_sql.setter
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.port
|
python
|
def port(self, value):
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
|
Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L138-L156
| null |
class Connection(object):
"""A class representing connections inside Data Sources."""
def __init__(self, connxml):
"""Connection is usually instantiated by passing in connection elements
in a Data Source. If creating a connection from scratch you can call
`from_attributes` passing in the connection attributes.
"""
self._connectionXML = connxml
self._dbname = connxml.get('dbname')
self._server = connxml.get('server')
self._username = connxml.get('username')
self._authentication = connxml.get('authentication')
self._class = connxml.get('class')
self._port = connxml.get('port', None)
self._query_band = connxml.get('query-band-spec', None)
self._initial_sql = connxml.get('one-time-sql', None)
def __repr__(self):
return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self)))
@classmethod
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
@property
def dbname(self):
"""Database name for the connection. Not the table name."""
return self._dbname
@dbname.setter
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
@property
def server(self):
"""Hostname or IP address of the database server. May also be a URL in some connection types."""
return self._server
@server.setter
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
@property
def username(self):
"""Username used to authenticate to the database."""
return self._username
@username.setter
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
@property
def authentication(self):
return self._authentication
@property
def dbclass(self):
"""The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list
can be found in dbclass.py"""
return self._class
@dbclass.setter
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
@property
def port(self):
"""Port used to connect to the database."""
return self._port
@port.setter
@property
def query_band(self):
"""Query band passed on connection to database."""
return self._query_band
@query_band.setter
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
@property
def initial_sql(self):
"""Initial SQL to be run."""
return self._initial_sql
@initial_sql.setter
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.query_band
|
python
|
def query_band(self, value):
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
|
Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L164-L182
| null |
class Connection(object):
"""A class representing connections inside Data Sources."""
def __init__(self, connxml):
"""Connection is usually instantiated by passing in connection elements
in a Data Source. If creating a connection from scratch you can call
`from_attributes` passing in the connection attributes.
"""
self._connectionXML = connxml
self._dbname = connxml.get('dbname')
self._server = connxml.get('server')
self._username = connxml.get('username')
self._authentication = connxml.get('authentication')
self._class = connxml.get('class')
self._port = connxml.get('port', None)
self._query_band = connxml.get('query-band-spec', None)
self._initial_sql = connxml.get('one-time-sql', None)
def __repr__(self):
return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self)))
@classmethod
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
@property
def dbname(self):
"""Database name for the connection. Not the table name."""
return self._dbname
@dbname.setter
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
@property
def server(self):
"""Hostname or IP address of the database server. May also be a URL in some connection types."""
return self._server
@server.setter
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
@property
def username(self):
"""Username used to authenticate to the database."""
return self._username
@username.setter
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
@property
def authentication(self):
return self._authentication
@property
def dbclass(self):
"""The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list
can be found in dbclass.py"""
return self._class
@dbclass.setter
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
@property
def port(self):
"""Port used to connect to the database."""
return self._port
@port.setter
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
@property
def query_band(self):
"""Query band passed on connection to database."""
return self._query_band
@query_band.setter
@property
def initial_sql(self):
"""Initial SQL to be run."""
return self._initial_sql
@initial_sql.setter
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.initial_sql
|
python
|
def initial_sql(self, value):
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L190-L208
| null |
class Connection(object):
"""A class representing connections inside Data Sources."""
def __init__(self, connxml):
"""Connection is usually instantiated by passing in connection elements
in a Data Source. If creating a connection from scratch you can call
`from_attributes` passing in the connection attributes.
"""
self._connectionXML = connxml
self._dbname = connxml.get('dbname')
self._server = connxml.get('server')
self._username = connxml.get('username')
self._authentication = connxml.get('authentication')
self._class = connxml.get('class')
self._port = connxml.get('port', None)
self._query_band = connxml.get('query-band-spec', None)
self._initial_sql = connxml.get('one-time-sql', None)
def __repr__(self):
return "'<Connection server='{}' dbname='{}' @ {}>'".format(self._server, self._dbname, hex(id(self)))
@classmethod
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
@property
def dbname(self):
"""Database name for the connection. Not the table name."""
return self._dbname
@dbname.setter
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
@property
def server(self):
"""Hostname or IP address of the database server. May also be a URL in some connection types."""
return self._server
@server.setter
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
@property
def username(self):
"""Username used to authenticate to the database."""
return self._username
@username.setter
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
@property
def authentication(self):
return self._authentication
@property
def dbclass(self):
"""The type of connection (e.g. 'MySQL', 'Postgresql'). A complete list
can be found in dbclass.py"""
return self._class
@dbclass.setter
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
@property
def port(self):
"""Port used to connect to the database."""
return self._port
@port.setter
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
@property
def query_band(self):
"""Query band passed on connection to database."""
return self._query_band
@query_band.setter
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
@property
def initial_sql(self):
"""Initial SQL to be run."""
return self._initial_sql
@initial_sql.setter
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
base36encode
|
python
|
def base36encode(number):
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
|
Converts an integer into a base36 string.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L63-L82
| null |
import collections
import itertools
import xml.etree.ElementTree as ET
import xml.sax.saxutils as sax
from uuid import uuid4
from tableaudocumentapi import Connection, xfile
from tableaudocumentapi import Field
from tableaudocumentapi.multilookup_dict import MultiLookupDict
from tableaudocumentapi.xfile import xml_open
########
# This is needed in order to determine if something is a string or not. It is necessary because
# of differences between python2 (basestring) and python3 (str). If python2 support is ever
# dropped, remove this and change the basestring references below to str
try:
basestring
except NameError: # pragma: no cover
basestring = str
########
_ColumnObjectReturnTuple = collections.namedtuple('_ColumnObjectReturnTupleType', ['id', 'object'])
def _get_metadata_xml_for_field(root_xml, field_name):
if "'" in field_name:
field_name = sax.escape(field_name, {"'": "'"})
xpath = u".//metadata-record[@class='column'][local-name='{}']".format(field_name)
return root_xml.find(xpath)
def _is_used_by_worksheet(names, field):
return any(y for y in names if y in field.worksheets)
class FieldDictionary(MultiLookupDict):
def used_by_sheet(self, name):
# If we pass in a string, no need to get complicated, just check to see if name is in
# the field's list of worksheets
if isinstance(name, basestring):
return [x for x in self.values() if name in x.worksheets]
# if we pass in a list, we need to check to see if any of the names in the list are in
# the field's list of worksheets
return [x for x in self.values() if _is_used_by_worksheet(name, x)]
def _column_object_from_column_xml(root_xml, column_xml):
field_object = Field.from_column_xml(column_xml)
local_name = field_object.id
metadata_record = _get_metadata_xml_for_field(root_xml, local_name)
if metadata_record is not None:
field_object.apply_metadata(metadata_record)
return _ColumnObjectReturnTuple(field_object.id, field_object)
def _column_object_from_metadata_xml(metadata_xml):
field_object = Field.from_metadata_xml(metadata_xml)
return _ColumnObjectReturnTuple(field_object.id, field_object)
def _make_unique_name(dbclass):
rand_part = base36encode(uuid4().int)
name = dbclass + '.' + rand_part
return name
class ConnectionParser(object):
"""Parser for detecting and extracting connections from differing Tableau file formats."""
def __init__(self, datasource_xml, version):
self._dsxml = datasource_xml
self._dsversion = version
def _extract_federated_connections(self):
connections = list(map(Connection, self._dsxml.findall('.//named-connections/named-connection/*')))
# 'sqlproxy' connections (Tableau Server Connections) are not embedded into named-connection elements
# extract them manually for now
connections.extend(map(Connection, self._dsxml.findall("./connection[@class='sqlproxy']")))
return connections
def _extract_legacy_connection(self):
return list(map(Connection, self._dsxml.findall('connection')))
def get_connections(self):
"""Find and return all connections based on file format version."""
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
class Datasource(object):
"""A class representing Tableau Data Sources, embedded in workbook files or
in TDS files.
"""
def __init__(self, dsxml, filename=None):
"""
Constructor. Default is to create datasource from xml.
"""
self._filename = filename
self._datasourceXML = dsxml
self._datasourceTree = ET.ElementTree(self._datasourceXML)
self._name = self._datasourceXML.get('name') or self._datasourceXML.get(
'formatted-name') # TDS files don't have a name attribute
self._version = self._datasourceXML.get('version')
self._caption = self._datasourceXML.get('caption', '')
self._connection_parser = ConnectionParser(
self._datasourceXML, version=self._version)
self._connections = self._connection_parser.get_connections()
self._fields = None
@classmethod
def from_file(cls, filename):
"""Initialize datasource from file (.tds ot .tdsx)"""
dsxml = xml_open(filename, 'datasource').getroot()
return cls(dsxml, filename)
@classmethod
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
def save(self):
"""
Call finalization code and save file.
Args:
None.
Returns:
Nothing.
"""
# save the file
xfile._save_file(self._filename, self._datasourceTree)
def save_as(self, new_filename):
"""
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
"""
xfile._save_file(self._filename, self._datasourceTree, new_filename)
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._datasourceXML.set('caption', value)
self._caption = value
@caption.deleter
def caption(self):
del self._datasourceXML.attrib['caption']
self._caption = ''
@property
def connections(self):
return self._connections
def clear_repository_location(self):
tag = self._datasourceXML.find('./repository-location')
if tag is not None:
self._datasourceXML.remove(tag)
@property
def fields(self):
if not self._fields:
self._fields = self._get_all_fields()
return self._fields
def _get_all_fields(self):
# Some columns are represented by `column` tags and others as `metadata-record` tags
# Find them all and chain them into one dictionary
column_field_objects = self._get_column_objects()
existing_column_fields = [x.id for x in column_field_objects]
metadata_only_field_objects = (x for x in self._get_metadata_objects() if x.id not in existing_column_fields)
field_objects = itertools.chain(column_field_objects, metadata_only_field_objects)
return FieldDictionary({k: v for k, v in field_objects})
def _get_metadata_objects(self):
return (_column_object_from_metadata_xml(x)
for x in self._datasourceTree.findall(".//metadata-record[@class='column']"))
def _get_column_objects(self):
return [_column_object_from_column_xml(self._datasourceTree, xml)
for xml in self._datasourceTree.findall('.//column')]
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
ConnectionParser.get_connections
|
python
|
def get_connections(self):
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
|
Find and return all connections based on file format version.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L108-L115
| null |
class ConnectionParser(object):
"""Parser for detecting and extracting connections from differing Tableau file formats."""
def __init__(self, datasource_xml, version):
self._dsxml = datasource_xml
self._dsversion = version
def _extract_federated_connections(self):
connections = list(map(Connection, self._dsxml.findall('.//named-connections/named-connection/*')))
# 'sqlproxy' connections (Tableau Server Connections) are not embedded into named-connection elements
# extract them manually for now
connections.extend(map(Connection, self._dsxml.findall("./connection[@class='sqlproxy']")))
return connections
def _extract_legacy_connection(self):
return list(map(Connection, self._dsxml.findall('connection')))
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
Datasource.from_file
|
python
|
def from_file(cls, filename):
dsxml = xml_open(filename, 'datasource').getroot()
return cls(dsxml, filename)
|
Initialize datasource from file (.tds ot .tdsx)
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L142-L146
|
[
"def xml_open(filename, expected_root=None):\n \"\"\"Opens the provided 'filename'. Handles detecting if the file is an archive,\n detecting the document version, and validating the root tag.\"\"\"\n\n # Is the file a zip (.twbx or .tdsx)\n if zipfile.is_zipfile(filename):\n tree = get_xml_from_archive(filename)\n else:\n tree = ET.parse(filename)\n\n # Is the file a supported version\n tree_root = tree.getroot()\n file_version = Version(tree_root.attrib.get('version', '0.0'))\n\n if file_version < MIN_SUPPORTED_VERSION:\n raise TableauVersionNotSupportedException(file_version)\n\n # Does the root tag match the object type (workbook or data source)\n if expected_root and (expected_root != tree_root.tag):\n raise TableauInvalidFileException(\n \"'{}'' is not a valid '{}' file\".format(filename, expected_root))\n\n return tree\n"
] |
class Datasource(object):
"""A class representing Tableau Data Sources, embedded in workbook files or
in TDS files.
"""
def __init__(self, dsxml, filename=None):
"""
Constructor. Default is to create datasource from xml.
"""
self._filename = filename
self._datasourceXML = dsxml
self._datasourceTree = ET.ElementTree(self._datasourceXML)
self._name = self._datasourceXML.get('name') or self._datasourceXML.get(
'formatted-name') # TDS files don't have a name attribute
self._version = self._datasourceXML.get('version')
self._caption = self._datasourceXML.get('caption', '')
self._connection_parser = ConnectionParser(
self._datasourceXML, version=self._version)
self._connections = self._connection_parser.get_connections()
self._fields = None
@classmethod
@classmethod
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
def save(self):
"""
Call finalization code and save file.
Args:
None.
Returns:
Nothing.
"""
# save the file
xfile._save_file(self._filename, self._datasourceTree)
def save_as(self, new_filename):
"""
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
"""
xfile._save_file(self._filename, self._datasourceTree, new_filename)
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._datasourceXML.set('caption', value)
self._caption = value
@caption.deleter
def caption(self):
del self._datasourceXML.attrib['caption']
self._caption = ''
@property
def connections(self):
return self._connections
def clear_repository_location(self):
tag = self._datasourceXML.find('./repository-location')
if tag is not None:
self._datasourceXML.remove(tag)
@property
def fields(self):
if not self._fields:
self._fields = self._get_all_fields()
return self._fields
def _get_all_fields(self):
# Some columns are represented by `column` tags and others as `metadata-record` tags
# Find them all and chain them into one dictionary
column_field_objects = self._get_column_objects()
existing_column_fields = [x.id for x in column_field_objects]
metadata_only_field_objects = (x for x in self._get_metadata_objects() if x.id not in existing_column_fields)
field_objects = itertools.chain(column_field_objects, metadata_only_field_objects)
return FieldDictionary({k: v for k, v in field_objects})
def _get_metadata_objects(self):
return (_column_object_from_metadata_xml(x)
for x in self._datasourceTree.findall(".//metadata-record[@class='column']"))
def _get_column_objects(self):
return [_column_object_from_column_xml(self._datasourceTree, xml)
for xml in self._datasourceTree.findall('.//column')]
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
Datasource.from_connections
|
python
|
def from_connections(cls, caption, connections):
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
|
Create a new Data Source give a list of Connections.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L149-L162
|
[
"def _make_unique_name(dbclass):\n rand_part = base36encode(uuid4().int)\n name = dbclass + '.' + rand_part\n return name\n"
] |
class Datasource(object):
"""A class representing Tableau Data Sources, embedded in workbook files or
in TDS files.
"""
def __init__(self, dsxml, filename=None):
"""
Constructor. Default is to create datasource from xml.
"""
self._filename = filename
self._datasourceXML = dsxml
self._datasourceTree = ET.ElementTree(self._datasourceXML)
self._name = self._datasourceXML.get('name') or self._datasourceXML.get(
'formatted-name') # TDS files don't have a name attribute
self._version = self._datasourceXML.get('version')
self._caption = self._datasourceXML.get('caption', '')
self._connection_parser = ConnectionParser(
self._datasourceXML, version=self._version)
self._connections = self._connection_parser.get_connections()
self._fields = None
@classmethod
def from_file(cls, filename):
"""Initialize datasource from file (.tds ot .tdsx)"""
dsxml = xml_open(filename, 'datasource').getroot()
return cls(dsxml, filename)
@classmethod
def save(self):
"""
Call finalization code and save file.
Args:
None.
Returns:
Nothing.
"""
# save the file
xfile._save_file(self._filename, self._datasourceTree)
def save_as(self, new_filename):
"""
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
"""
xfile._save_file(self._filename, self._datasourceTree, new_filename)
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._datasourceXML.set('caption', value)
self._caption = value
@caption.deleter
def caption(self):
del self._datasourceXML.attrib['caption']
self._caption = ''
@property
def connections(self):
return self._connections
def clear_repository_location(self):
tag = self._datasourceXML.find('./repository-location')
if tag is not None:
self._datasourceXML.remove(tag)
@property
def fields(self):
if not self._fields:
self._fields = self._get_all_fields()
return self._fields
def _get_all_fields(self):
# Some columns are represented by `column` tags and others as `metadata-record` tags
# Find them all and chain them into one dictionary
column_field_objects = self._get_column_objects()
existing_column_fields = [x.id for x in column_field_objects]
metadata_only_field_objects = (x for x in self._get_metadata_objects() if x.id not in existing_column_fields)
field_objects = itertools.chain(column_field_objects, metadata_only_field_objects)
return FieldDictionary({k: v for k, v in field_objects})
def _get_metadata_objects(self):
return (_column_object_from_metadata_xml(x)
for x in self._datasourceTree.findall(".//metadata-record[@class='column']"))
def _get_column_objects(self):
return [_column_object_from_column_xml(self._datasourceTree, xml)
for xml in self._datasourceTree.findall('.//column')]
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
Datasource.save_as
|
python
|
def save_as(self, new_filename):
xfile._save_file(self._filename, self._datasourceTree, new_filename)
|
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L180-L192
|
[
"def _save_file(container_file, xml_tree, new_filename=None):\n\n if new_filename is None:\n new_filename = container_file\n\n if zipfile.is_zipfile(container_file):\n save_into_archive(xml_tree, container_file, new_filename)\n else:\n xml_tree.write(new_filename, encoding=\"utf-8\", xml_declaration=True)\n"
] |
class Datasource(object):
"""A class representing Tableau Data Sources, embedded in workbook files or
in TDS files.
"""
def __init__(self, dsxml, filename=None):
"""
Constructor. Default is to create datasource from xml.
"""
self._filename = filename
self._datasourceXML = dsxml
self._datasourceTree = ET.ElementTree(self._datasourceXML)
self._name = self._datasourceXML.get('name') or self._datasourceXML.get(
'formatted-name') # TDS files don't have a name attribute
self._version = self._datasourceXML.get('version')
self._caption = self._datasourceXML.get('caption', '')
self._connection_parser = ConnectionParser(
self._datasourceXML, version=self._version)
self._connections = self._connection_parser.get_connections()
self._fields = None
@classmethod
def from_file(cls, filename):
"""Initialize datasource from file (.tds ot .tdsx)"""
dsxml = xml_open(filename, 'datasource').getroot()
return cls(dsxml, filename)
@classmethod
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
def save(self):
"""
Call finalization code and save file.
Args:
None.
Returns:
Nothing.
"""
# save the file
xfile._save_file(self._filename, self._datasourceTree)
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._datasourceXML.set('caption', value)
self._caption = value
@caption.deleter
def caption(self):
del self._datasourceXML.attrib['caption']
self._caption = ''
@property
def connections(self):
return self._connections
def clear_repository_location(self):
tag = self._datasourceXML.find('./repository-location')
if tag is not None:
self._datasourceXML.remove(tag)
@property
def fields(self):
if not self._fields:
self._fields = self._get_all_fields()
return self._fields
def _get_all_fields(self):
# Some columns are represented by `column` tags and others as `metadata-record` tags
# Find them all and chain them into one dictionary
column_field_objects = self._get_column_objects()
existing_column_fields = [x.id for x in column_field_objects]
metadata_only_field_objects = (x for x in self._get_metadata_objects() if x.id not in existing_column_fields)
field_objects = itertools.chain(column_field_objects, metadata_only_field_objects)
return FieldDictionary({k: v for k, v in field_objects})
def _get_metadata_objects(self):
return (_column_object_from_metadata_xml(x)
for x in self._datasourceTree.findall(".//metadata-record[@class='column']"))
def _get_column_objects(self):
return [_column_object_from_column_xml(self._datasourceTree, xml)
for xml in self._datasourceTree.findall('.//column')]
|
tableau/document-api-python
|
tableaudocumentapi/workbook.py
|
Workbook.save_as
|
python
|
def save_as(self, new_filename):
xfile._save_file(
self._filename, self._workbookTree, new_filename)
|
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/workbook.py#L60-L72
|
[
"def _save_file(container_file, xml_tree, new_filename=None):\n\n if new_filename is None:\n new_filename = container_file\n\n if zipfile.is_zipfile(container_file):\n save_into_archive(xml_tree, container_file, new_filename)\n else:\n xml_tree.write(new_filename, encoding=\"utf-8\", xml_declaration=True)\n"
] |
class Workbook(object):
"""A class for writing Tableau workbook files."""
def __init__(self, filename):
"""Open the workbook at `filename`. This will handle packaged and unpacked
workbook files automatically. This will also parse Data Sources and Worksheets
for access.
"""
self._filename = filename
self._workbookTree = xml_open(self._filename, 'workbook')
self._workbookRoot = self._workbookTree.getroot()
# prepare our datasource objects
self._datasources = self._prepare_datasources(
self._workbookRoot) # self.workbookRoot.find('datasources')
self._datasource_index = self._prepare_datasource_index(self._datasources)
self._worksheets = self._prepare_worksheets(
self._workbookRoot, self._datasource_index
)
@property
def datasources(self):
return self._datasources
@property
def worksheets(self):
return self._worksheets
@property
def filename(self):
return self._filename
def save(self):
"""
Call finalization code and save file.
Args:
None.
Returns:
Nothing.
"""
# save the file
xfile._save_file(self._filename, self._workbookTree)
@staticmethod
def _prepare_datasource_index(datasources):
retval = weakref.WeakValueDictionary()
for datasource in datasources:
retval[datasource.name] = datasource
return retval
@staticmethod
def _prepare_datasources(xml_root):
datasources = []
# loop through our datasources and append
datasource_elements = xml_root.find('datasources')
if datasource_elements is None:
return []
for datasource in datasource_elements:
ds = Datasource(datasource)
datasources.append(ds)
return datasources
@staticmethod
def _prepare_worksheets(xml_root, ds_index):
worksheets = []
worksheets_element = xml_root.find('.//worksheets')
if worksheets_element is None:
return worksheets
for worksheet_element in worksheets_element:
worksheet_name = worksheet_element.attrib['name']
worksheets.append(worksheet_name) # TODO: A real worksheet object, for now, only name
dependencies = worksheet_element.findall('.//datasource-dependencies')
for dependency in dependencies:
datasource_name = dependency.attrib['datasource']
datasource = ds_index[datasource_name]
for column in dependency.findall('.//column'):
column_name = column.attrib['name']
if column_name in datasource.fields:
datasource.fields[column_name].add_used_in(worksheet_name)
return worksheets
|
tableau/document-api-python
|
tableaudocumentapi/field.py
|
Field.name
|
python
|
def name(self):
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id
|
Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist.
|
train
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/field.py#L99-L112
| null |
class Field(object):
""" Represents a field in a datasource """
def __init__(self, column_xml=None, metadata_xml=None):
# Initialize all the possible attributes
for attrib in _ATTRIBUTES:
setattr(self, '_{}'.format(attrib), None)
for attrib in _METADATA_ATTRIBUTES:
setattr(self, '_{}'.format(attrib), None)
self._worksheets = set()
if column_xml is not None:
self._initialize_from_column_xml(column_xml)
# This isn't currently never called because of the way we get the data from the xml,
# but during the refactor, we might need it. This is commented out as a reminder
# if metadata_xml is not None:
# self.apply_metadata(metadata_xml)
elif metadata_xml is not None:
self._initialize_from_metadata_xml(metadata_xml)
else:
raise AttributeError('column_xml or metadata_xml needed to initialize field')
def _initialize_from_column_xml(self, xmldata):
for attrib in _ATTRIBUTES:
self._apply_attribute(xmldata, attrib, lambda x: xmldata.attrib.get(x, None))
def _initialize_from_metadata_xml(self, xmldata):
for metadata_name, field_name in _METADATA_TO_FIELD_MAP:
self._apply_attribute(xmldata, field_name, lambda x: xmldata.find('.//{}'.format(metadata_name)).text,
read_name=metadata_name)
self.apply_metadata(xmldata)
########################################
# Special Case methods for construction fields from various sources
# not intended for client use
########################################
def apply_metadata(self, metadata_record):
for attrib in _METADATA_ATTRIBUTES:
self._apply_attribute(metadata_record, attrib, functools.partial(_find_metadata_record, metadata_record))
def add_used_in(self, name):
self._worksheets.add(name)
@classmethod
def from_column_xml(cls, xmldata):
return cls(column_xml=xmldata)
@classmethod
def from_metadata_xml(cls, xmldata):
return cls(metadata_xml=xmldata)
def _apply_attribute(self, xmldata, attrib, default_func, read_name=None):
if read_name is None:
read_name = attrib
if hasattr(self, '_read_{}'.format(read_name)):
value = getattr(self, '_read_{}'.format(read_name))(xmldata)
else:
value = default_func(attrib)
setattr(self, '_{}'.format(attrib), value)
@property
@property
def id(self):
""" Name of the field as specified in the file, usually surrounded by [ ] """
return self._id
@property
def caption(self):
""" Name of the field as displayed in Tableau unless an aliases is defined """
return self._caption
@property
def alias(self):
""" Name of the field as displayed in Tableau if the default name isn't wanted """
return self._alias
@property
def datatype(self):
""" Type of the field within Tableau (string, integer, etc) """
return self._datatype
@property
def role(self):
""" Dimension or Measure """
return self._role
@property
def is_quantitative(self):
""" A dependent value, usually a measure of something
e.g. Profit, Gross Sales """
return self._type == 'quantitative'
@property
def is_ordinal(self):
""" Is this field a categorical field that has a specific order
e.g. How do you feel? 1 - awful, 2 - ok, 3 - fantastic """
return self._type == 'ordinal'
@property
def is_nominal(self):
""" Is this field a categorical field that does not have a specific order
e.g. What color is your hair? """
return self._type == 'nominal'
@property
def calculation(self):
""" If this field is a calculated field, this will be the formula """
return self._calculation
@property
def default_aggregation(self):
""" The default type of aggregation on the field (e.g Sum, Avg)"""
return self._aggregation
@property
def description(self):
""" The contents of the <desc> tag on a field """
return self._description
@property
def worksheets(self):
return list(self._worksheets)
######################################
# Special Case handling methods for reading the values from the XML
######################################
@staticmethod
def _read_id(xmldata):
# ID is actually the name of the field, but to provide a nice name, we call this ID
return xmldata.attrib.get('name', None)
@staticmethod
def _read_calculation(xmldata):
# The formula for a calculation is stored in a child element, so we need to pull it out separately.
calc = xmldata.find('.//calculation')
if calc is None:
return None
return calc.attrib.get('formula', None)
@staticmethod
def _read_description(xmldata):
description = xmldata.find('.//desc')
if description is None:
return None
description_string = ET.tostring(description, encoding='utf-8')
# Format expects a unicode string so in Python 2 we have to do the explicit conversion
if isinstance(description_string, bytes):
description_string = description_string.decode('utf-8')
return description_string
|
metacloud/gilt
|
gilt/config.py
|
config
|
python
|
def config(filename):
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
|
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L41-L59
|
[
"def _get_config_generator(filename):\n \"\"\"\n A generator which populates and return a dict.\n\n :parse filename: A string containing the path to YAML file.\n :return: dict\n \"\"\"\n for d in _get_config(filename):\n repo = d['git']\n parsedrepo = giturlparse.parse(repo)\n name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)\n src_dir = os.path.join(_get_clone_dir(), name)\n files = d.get('files')\n post_commands = d.get('post_commands', [])\n dst_dir = None\n if not files:\n dst_dir = _get_dst_dir(d['dst'])\n yield {\n 'git': repo,\n 'lock_file': _get_lock_file(name),\n 'version': d['version'],\n 'name': name,\n 'src': src_dir,\n 'dst': dst_dir,\n 'files': _get_files_config(src_dir, files),\n 'post_commands': post_commands,\n }\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_get_files_config
|
python
|
def _get_files_config(src_dir, files_list):
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
|
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L62-L76
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_get_config_generator
|
python
|
def _get_config_generator(filename):
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
|
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L79-L105
|
[
"def _get_files_config(src_dir, files_list):\n \"\"\"\n Construct `FileConfig` object and return a list.\n\n :param src_dir: A string containing the source directory.\n :param files_list: A list of dicts containing the src/dst mapping of files\n to overlay.\n :return: list\n \"\"\"\n FilesConfig = collections.namedtuple('FilesConfig',\n ['src', 'dst', 'post_commands'])\n\n return [\n FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)\n ]\n",
"def _get_config(filename):\n \"\"\"\n Parse the provided YAML file and return a dict.\n\n :parse filename: A string containing the path to YAML file.\n :return: dict\n \"\"\"\n i = interpolation.Interpolator(interpolation.TemplateWithDefaults,\n os.environ)\n\n with open(filename, 'r') as stream:\n try:\n interpolated_config = i.interpolate(stream.read())\n return yaml.safe_load(interpolated_config)\n except yaml.parser.ParserError as e:\n msg = 'Error parsing gilt config: {0}'.format(e)\n raise ParseError(msg)\n",
"def _get_clone_dir():\n \"\"\"\n Construct gilt's clone directory and return a str.\n\n :return: str\n \"\"\"\n return os.path.join(\n _get_base_dir(),\n 'clone', )\n",
"def _get_dst_dir(dst_dir):\n \"\"\"\n Prefix the provided string with working directory and return a\n str.\n\n :param dst_dir: A string to be prefixed with the working dir.\n :return: str\n \"\"\"\n wd = os.getcwd()\n _makedirs(dst_dir)\n\n return os.path.join(wd, dst_dir)\n",
"def _get_lock_file(name):\n \"\"\" Return the lock file for the given name. \"\"\"\n return os.path.join(\n _get_lock_dir(),\n name, )\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_get_files_generator
|
python
|
def _get_files_generator(src_dir, files_list):
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
|
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L108-L123
|
[
"def _get_dst_dir(dst_dir):\n \"\"\"\n Prefix the provided string with working directory and return a\n str.\n\n :param dst_dir: A string to be prefixed with the working dir.\n :return: str\n \"\"\"\n wd = os.getcwd()\n _makedirs(dst_dir)\n\n return os.path.join(wd, dst_dir)\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_get_config
|
python
|
def _get_config(filename):
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
|
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L126-L142
|
[
"def interpolate(self, string):\n try:\n return self.templater(string).substitute(self.mapping)\n except ValueError:\n raise InvalidInterpolation(string)\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_get_dst_dir
|
python
|
def _get_dst_dir(dst_dir):
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
|
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L145-L156
|
[
"def _makedirs(path):\n \"\"\"\n Create a base directory of the provided path and return None.\n\n :param path: A string containing a path to be deconstructed and basedir\n created.\n :return: None\n \"\"\"\n dirname, _ = os.path.split(path)\n try:\n os.makedirs(dirname)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n pass\n else:\n raise\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
def _makedirs(path):
"""
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
"""
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
metacloud/gilt
|
gilt/config.py
|
_makedirs
|
python
|
def _makedirs(path):
dirname, _ = os.path.split(path)
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
|
Create a base directory of the provided path and return None.
:param path: A string containing a path to be deconstructed and basedir
created.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/config.py#L193-L208
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import errno
import os
import giturlparse
import yaml
from gilt import interpolation
class ParseError(Exception):
""" Error raised when a config can't be loaded properly. """
pass
BASE_WORKING_DIR = os.environ.get('GILT_CACHE_DIRECTORY', '~/.gilt')
def config(filename):
"""
Construct `Config` object and return a list.
:parse filename: A string containing the path to YAML file.
:return: list
"""
Config = collections.namedtuple('Config', [
'git',
'lock_file',
'version',
'name',
'src',
'dst',
'files',
'post_commands',
])
return [Config(**d) for d in _get_config_generator(filename)]
def _get_files_config(src_dir, files_list):
"""
Construct `FileConfig` object and return a list.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: list
"""
FilesConfig = collections.namedtuple('FilesConfig',
['src', 'dst', 'post_commands'])
return [
FilesConfig(**d) for d in _get_files_generator(src_dir, files_list)
]
def _get_config_generator(filename):
"""
A generator which populates and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
for d in _get_config(filename):
repo = d['git']
parsedrepo = giturlparse.parse(repo)
name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name)
src_dir = os.path.join(_get_clone_dir(), name)
files = d.get('files')
post_commands = d.get('post_commands', [])
dst_dir = None
if not files:
dst_dir = _get_dst_dir(d['dst'])
yield {
'git': repo,
'lock_file': _get_lock_file(name),
'version': d['version'],
'name': name,
'src': src_dir,
'dst': dst_dir,
'files': _get_files_config(src_dir, files),
'post_commands': post_commands,
}
def _get_files_generator(src_dir, files_list):
"""
A generator which populates and return a dict.
:param src_dir: A string containing the source directory.
:param files_list: A list of dicts containing the src/dst mapping of files
to overlay.
:return: dict
"""
if files_list:
for d in files_list:
yield {
'src': os.path.join(src_dir, d['src']),
'dst': _get_dst_dir(d['dst']),
'post_commands': d.get('post_commands', []),
}
def _get_config(filename):
"""
Parse the provided YAML file and return a dict.
:parse filename: A string containing the path to YAML file.
:return: dict
"""
i = interpolation.Interpolator(interpolation.TemplateWithDefaults,
os.environ)
with open(filename, 'r') as stream:
try:
interpolated_config = i.interpolate(stream.read())
return yaml.safe_load(interpolated_config)
except yaml.parser.ParserError as e:
msg = 'Error parsing gilt config: {0}'.format(e)
raise ParseError(msg)
def _get_dst_dir(dst_dir):
"""
Prefix the provided string with working directory and return a
str.
:param dst_dir: A string to be prefixed with the working dir.
:return: str
"""
wd = os.getcwd()
_makedirs(dst_dir)
return os.path.join(wd, dst_dir)
def _get_lock_file(name):
""" Return the lock file for the given name. """
return os.path.join(
_get_lock_dir(),
name, )
def _get_base_dir():
""" Return gilt's base working directory. """
return os.path.expanduser(BASE_WORKING_DIR)
def _get_lock_dir():
"""
Construct gilt's lock directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'lock', )
def _get_clone_dir():
"""
Construct gilt's clone directory and return a str.
:return: str
"""
return os.path.join(
_get_base_dir(),
'clone', )
|
metacloud/gilt
|
gilt/shell.py
|
main
|
python
|
def main(ctx, config, debug): # pragma: no cover
ctx.obj = {}
ctx.obj['args'] = {}
ctx.obj['args']['debug'] = debug
ctx.obj['args']['config'] = config
|
gilt - A GIT layering tool.
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/shell.py#L50-L55
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import click
import fasteners
import gilt
from gilt import config
from gilt import git
from gilt import util
class NotFoundError(Exception):
""" Error raised when a config can not be found. """
pass
@click.group()
@click.option(
'--config',
default='gilt.yml',
help='Path to config file. Default gilt.yml')
@click.option(
'--debug/--no-debug',
default=False,
help='Enable or disable debug mode. Default is disabled.')
@click.version_option(version=gilt.__version__)
@click.pass_context
@click.command()
@click.pass_context
def overlay(ctx): # pragma: no cover
""" Install gilt dependencies """
args = ctx.obj.get('args')
filename = args.get('config')
debug = args.get('debug')
_setup(filename)
for c in config.config(filename):
with fasteners.InterProcessLock(c.lock_file):
util.print_info('{}:'.format(c.name))
if not os.path.exists(c.src):
git.clone(c.name, c.git, c.src, debug=debug)
if c.dst:
git.extract(c.src, c.dst, c.version, debug=debug)
post_commands = {c.dst: c.post_commands}
else:
git.overlay(c.src, c.files, c.version, debug=debug)
post_commands = {
conf.dst: conf.post_commands
for conf in c.files
}
# Run post commands if any.
for dst, commands in post_commands.items():
for command in commands:
msg = ' - running `{}` in {}'.format(command, dst)
util.print_info(msg)
cmd = util.build_sh_cmd(command, cwd=dst)
util.run_command(cmd, debug=debug)
def _setup(filename):
if not os.path.exists(filename):
msg = 'Unable to find {}. Exiting.'.format(filename)
raise NotFoundError(msg)
working_dirs = [config._get_lock_dir(), config._get_clone_dir()]
for working_dir in working_dirs:
if not os.path.exists(working_dir):
os.makedirs(working_dir)
main.add_command(overlay)
|
metacloud/gilt
|
gilt/shell.py
|
overlay
|
python
|
def overlay(ctx): # pragma: no cover
args = ctx.obj.get('args')
filename = args.get('config')
debug = args.get('debug')
_setup(filename)
for c in config.config(filename):
with fasteners.InterProcessLock(c.lock_file):
util.print_info('{}:'.format(c.name))
if not os.path.exists(c.src):
git.clone(c.name, c.git, c.src, debug=debug)
if c.dst:
git.extract(c.src, c.dst, c.version, debug=debug)
post_commands = {c.dst: c.post_commands}
else:
git.overlay(c.src, c.files, c.version, debug=debug)
post_commands = {
conf.dst: conf.post_commands
for conf in c.files
}
# Run post commands if any.
for dst, commands in post_commands.items():
for command in commands:
msg = ' - running `{}` in {}'.format(command, dst)
util.print_info(msg)
cmd = util.build_sh_cmd(command, cwd=dst)
util.run_command(cmd, debug=debug)
|
Install gilt dependencies
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/shell.py#L60-L87
|
[
"def config(filename):\n \"\"\"\n Construct `Config` object and return a list.\n\n :parse filename: A string containing the path to YAML file.\n :return: list\n \"\"\"\n Config = collections.namedtuple('Config', [\n 'git',\n 'lock_file',\n 'version',\n 'name',\n 'src',\n 'dst',\n 'files',\n 'post_commands',\n ])\n\n return [Config(**d) for d in _get_config_generator(filename)]\n",
"def _setup(filename):\n if not os.path.exists(filename):\n msg = 'Unable to find {}. Exiting.'.format(filename)\n raise NotFoundError(msg)\n\n working_dirs = [config._get_lock_dir(), config._get_clone_dir()]\n for working_dir in working_dirs:\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)\n",
"def clone(name, repository, destination, debug=False):\n \"\"\"\n Clone the specified repository into a temporary directory and return None.\n\n :param name: A string containing the name of the repository being cloned.\n :param repository: A string containing the repository to clone.\n :param destination: A string containing the directory to clone the\n repository into.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n msg = ' - cloning {} to {}'.format(name, destination)\n util.print_info(msg)\n cmd = sh.git.bake('clone', repository, destination)\n util.run_command(cmd, debug=debug)\n",
"def extract(repository, destination, version, debug=False):\n \"\"\"\n Extract the specified repository/version into the given directory and\n return None.\n\n :param repository: A string containing the path to the repository to be\n extracted.\n :param destination: A string containing the directory to clone the\n repository into. Relative to the directory ``gilt`` is running\n in. Must end with a '/'.\n :param version: A string containing the branch/tag/sha to be exported.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n with util.saved_cwd():\n if os.path.isdir(destination):\n shutil.rmtree(destination)\n\n os.chdir(repository)\n _get_version(version, debug)\n cmd = sh.git.bake(\n 'checkout-index', force=True, all=True, prefix=destination)\n util.run_command(cmd, debug=debug)\n msg = ' - extracting ({}) {} to {}'.format(version, repository,\n destination)\n util.print_info(msg)\n",
"def print_info(msg):\n \"\"\" Print the given message to STDOUT. \"\"\"\n print(msg)\n",
"def run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and return None.\n\n :param cmd: A `sh.Command` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if debug:\n msg = ' PWD: {}'.format(os.getcwd())\n print_warn(msg)\n msg = ' COMMAND: {}'.format(cmd)\n print_warn(msg)\n cmd()\n",
"def overlay(repository, files, version, debug=False):\n \"\"\"\n Overlay files from the specified repository/version into the given\n directory and return None.\n\n :param repository: A string containing the path to the repository to be\n extracted.\n :param files: A list of `FileConfig` objects.\n :param version: A string containing the branch/tag/sha to be exported.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n with util.saved_cwd():\n os.chdir(repository)\n _get_version(version, debug)\n\n for fc in files:\n if '*' in fc.src:\n for filename in glob.glob(fc.src):\n util.copy(filename, fc.dst)\n msg = ' - copied ({}) {} to {}'.format(\n version, filename, fc.dst)\n util.print_info(msg)\n else:\n if os.path.isdir(fc.dst) and os.path.isdir(fc.src):\n shutil.rmtree(fc.dst)\n util.copy(fc.src, fc.dst)\n msg = ' - copied ({}) {} to {}'.format(\n version, fc.src, fc.dst)\n util.print_info(msg)\n",
"def build_sh_cmd(cmd, cwd=None):\n \"\"\"Build a `sh.Command` from a string.\n\n :param cmd: String with the command to convert.\n :param cwd: Optional path to use as working directory.\n :return: `sh.Command`\n \"\"\"\n args = cmd.split()\n return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import click
import fasteners
import gilt
from gilt import config
from gilt import git
from gilt import util
class NotFoundError(Exception):
""" Error raised when a config can not be found. """
pass
@click.group()
@click.option(
'--config',
default='gilt.yml',
help='Path to config file. Default gilt.yml')
@click.option(
'--debug/--no-debug',
default=False,
help='Enable or disable debug mode. Default is disabled.')
@click.version_option(version=gilt.__version__)
@click.pass_context
def main(ctx, config, debug): # pragma: no cover
""" gilt - A GIT layering tool. """
ctx.obj = {}
ctx.obj['args'] = {}
ctx.obj['args']['debug'] = debug
ctx.obj['args']['config'] = config
@click.command()
@click.pass_context
def _setup(filename):
if not os.path.exists(filename):
msg = 'Unable to find {}. Exiting.'.format(filename)
raise NotFoundError(msg)
working_dirs = [config._get_lock_dir(), config._get_clone_dir()]
for working_dir in working_dirs:
if not os.path.exists(working_dir):
os.makedirs(working_dir)
main.add_command(overlay)
|
metacloud/gilt
|
gilt/git.py
|
clone
|
python
|
def clone(name, repository, destination, debug=False):
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
|
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L32-L46
|
[
"def print_info(msg):\n \"\"\" Print the given message to STDOUT. \"\"\"\n print(msg)\n",
"def run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and return None.\n\n :param cmd: A `sh.Command` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if debug:\n msg = ' PWD: {}'.format(os.getcwd())\n print_warn(msg)\n msg = ' COMMAND: {}'.format(cmd)\n print_warn(msg)\n cmd()\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def extract(repository, destination, version, debug=False):
"""
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
def overlay(repository, files, version, debug=False):
"""
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
def _get_version(version, debug=False):
"""
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
def _has_commit(version, debug=False):
"""
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/git.py
|
extract
|
python
|
def extract(repository, destination, version, debug=False):
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
|
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L49-L74
|
[
"def print_info(msg):\n \"\"\" Print the given message to STDOUT. \"\"\"\n print(msg)\n",
"def run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and return None.\n\n :param cmd: A `sh.Command` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if debug:\n msg = ' PWD: {}'.format(os.getcwd())\n print_warn(msg)\n msg = ' COMMAND: {}'.format(cmd)\n print_warn(msg)\n cmd()\n",
"def _get_version(version, debug=False):\n \"\"\"\n Handle switching to the specified version and return None.\n\n 1. Fetch the origin.\n 2. Checkout the specified version.\n 3. Clean the repository before we begin.\n 4. Pull the origin when a branch; _not_ a commit id.\n\n :param version: A string containing the branch/tag/sha to be exported.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if not any(\n (_has_branch(version, debug), _has_tag(version, debug), _has_commit(\n version, debug))):\n cmd = sh.git.bake('fetch')\n util.run_command(cmd, debug=debug)\n cmd = sh.git.bake('checkout', version)\n util.run_command(cmd, debug=debug)\n cmd = sh.git.bake('clean', '-d', '-x', '-f')\n util.run_command(cmd, debug=debug)\n if _has_branch(version, debug):\n cmd = sh.git.bake('pull', rebase=True, ff_only=True)\n util.run_command(cmd, debug=debug)\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def clone(name, repository, destination, debug=False):
"""
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
"""
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
def overlay(repository, files, version, debug=False):
"""
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
def _get_version(version, debug=False):
"""
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
def _has_commit(version, debug=False):
"""
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/git.py
|
overlay
|
python
|
def overlay(repository, files, version, debug=False):
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
|
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L77-L106
|
[
"def copy(src, dst):\n \"\"\"\n Handle the copying of a file or directory.\n\n The destination basedir _must_ exist.\n\n :param src: A string containing the path of the source to copy. If the\n source ends with a '/', will become a recursive directory copy of source.\n :param dst: A string containing the path to the destination. If the\n destination ends with a '/', will copy into the target directory.\n :return: None\n \"\"\"\n try:\n shutil.copytree(src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise\n",
"def print_info(msg):\n \"\"\" Print the given message to STDOUT. \"\"\"\n print(msg)\n",
"def _get_version(version, debug=False):\n \"\"\"\n Handle switching to the specified version and return None.\n\n 1. Fetch the origin.\n 2. Checkout the specified version.\n 3. Clean the repository before we begin.\n 4. Pull the origin when a branch; _not_ a commit id.\n\n :param version: A string containing the branch/tag/sha to be exported.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if not any(\n (_has_branch(version, debug), _has_tag(version, debug), _has_commit(\n version, debug))):\n cmd = sh.git.bake('fetch')\n util.run_command(cmd, debug=debug)\n cmd = sh.git.bake('checkout', version)\n util.run_command(cmd, debug=debug)\n cmd = sh.git.bake('clean', '-d', '-x', '-f')\n util.run_command(cmd, debug=debug)\n if _has_branch(version, debug):\n cmd = sh.git.bake('pull', rebase=True, ff_only=True)\n util.run_command(cmd, debug=debug)\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def clone(name, repository, destination, debug=False):
"""
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
"""
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
def extract(repository, destination, version, debug=False):
"""
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
def _get_version(version, debug=False):
"""
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
def _has_commit(version, debug=False):
"""
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/git.py
|
_get_version
|
python
|
def _get_version(version, debug=False):
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
|
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L109-L133
|
[
"def run_command(cmd, debug=False):\n \"\"\"\n Execute the given command and return None.\n\n :param cmd: A `sh.Command` object to execute.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n if debug:\n msg = ' PWD: {}'.format(os.getcwd())\n print_warn(msg)\n msg = ' COMMAND: {}'.format(cmd)\n print_warn(msg)\n cmd()\n",
"def _has_branch(version, debug=False):\n \"\"\"\n Determine a version is a local git branch name or not.\n\n :param version: A string containing the branch/tag/sha to be determined.\n :param debug: An optional bool to toggle debug output.\n :return: bool\n \"\"\"\n cmd = sh.git.bake('show-ref', '--verify', '--quiet',\n \"refs/heads/{}\".format(version))\n try:\n util.run_command(cmd, debug=debug)\n return True\n except sh.ErrorReturnCode:\n return False\n",
"def _has_tag(version, debug=False):\n \"\"\"\n Determine a version is a local git tag name or not.\n\n :param version: A string containing the branch/tag/sha to be determined.\n :param debug: An optional bool to toggle debug output.\n :return: bool\n \"\"\"\n cmd = sh.git.bake('show-ref', '--verify', '--quiet',\n \"refs/tags/{}\".format(version))\n try:\n util.run_command(cmd, debug=debug)\n return True\n except sh.ErrorReturnCode:\n return False\n",
"def _has_commit(version, debug=False):\n \"\"\"\n Determine a version is a local git commit sha or not.\n\n :param version: A string containing the branch/tag/sha to be determined.\n :param debug: An optional bool to toggle debug output.\n :return: bool\n \"\"\"\n if _has_tag(version, debug) or _has_branch(version, debug):\n return False\n cmd = sh.git.bake('cat-file', '-e', version)\n try:\n util.run_command(cmd, debug=debug)\n return True\n except sh.ErrorReturnCode:\n return False\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def clone(name, repository, destination, debug=False):
"""
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
"""
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
def extract(repository, destination, version, debug=False):
"""
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
def overlay(repository, files, version, debug=False):
"""
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
def _has_commit(version, debug=False):
"""
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/git.py
|
_has_commit
|
python
|
def _has_commit(version, debug=False):
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L136-L151
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def clone(name, repository, destination, debug=False):
"""
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
"""
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
def extract(repository, destination, version, debug=False):
"""
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
def overlay(repository, files, version, debug=False):
"""
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
def _get_version(version, debug=False):
"""
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
def _has_tag(version, debug=False):
"""
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/git.py
|
_has_tag
|
python
|
def _has_tag(version, debug=False):
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/tags/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
Determine a version is a local git tag name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/git.py#L154-L168
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import glob
import os
import shutil
import sh
from gilt import util
def clone(name, repository, destination, debug=False):
"""
Clone the specified repository into a temporary directory and return None.
:param name: A string containing the name of the repository being cloned.
:param repository: A string containing the repository to clone.
:param destination: A string containing the directory to clone the
repository into.
:param debug: An optional bool to toggle debug output.
:return: None
"""
msg = ' - cloning {} to {}'.format(name, destination)
util.print_info(msg)
cmd = sh.git.bake('clone', repository, destination)
util.run_command(cmd, debug=debug)
def extract(repository, destination, version, debug=False):
"""
Extract the specified repository/version into the given directory and
return None.
:param repository: A string containing the path to the repository to be
extracted.
:param destination: A string containing the directory to clone the
repository into. Relative to the directory ``gilt`` is running
in. Must end with a '/'.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
if os.path.isdir(destination):
shutil.rmtree(destination)
os.chdir(repository)
_get_version(version, debug)
cmd = sh.git.bake(
'checkout-index', force=True, all=True, prefix=destination)
util.run_command(cmd, debug=debug)
msg = ' - extracting ({}) {} to {}'.format(version, repository,
destination)
util.print_info(msg)
def overlay(repository, files, version, debug=False):
"""
Overlay files from the specified repository/version into the given
directory and return None.
:param repository: A string containing the path to the repository to be
extracted.
:param files: A list of `FileConfig` objects.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
with util.saved_cwd():
os.chdir(repository)
_get_version(version, debug)
for fc in files:
if '*' in fc.src:
for filename in glob.glob(fc.src):
util.copy(filename, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, filename, fc.dst)
util.print_info(msg)
else:
if os.path.isdir(fc.dst) and os.path.isdir(fc.src):
shutil.rmtree(fc.dst)
util.copy(fc.src, fc.dst)
msg = ' - copied ({}) {} to {}'.format(
version, fc.src, fc.dst)
util.print_info(msg)
def _get_version(version, debug=False):
"""
Handle switching to the specified version and return None.
1. Fetch the origin.
2. Checkout the specified version.
3. Clean the repository before we begin.
4. Pull the origin when a branch; _not_ a commit id.
:param version: A string containing the branch/tag/sha to be exported.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if not any(
(_has_branch(version, debug), _has_tag(version, debug), _has_commit(
version, debug))):
cmd = sh.git.bake('fetch')
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('checkout', version)
util.run_command(cmd, debug=debug)
cmd = sh.git.bake('clean', '-d', '-x', '-f')
util.run_command(cmd, debug=debug)
if _has_branch(version, debug):
cmd = sh.git.bake('pull', rebase=True, ff_only=True)
util.run_command(cmd, debug=debug)
def _has_commit(version, debug=False):
"""
Determine a version is a local git commit sha or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
if _has_tag(version, debug) or _has_branch(version, debug):
return False
cmd = sh.git.bake('cat-file', '-e', version)
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
def _has_branch(version, debug=False):
"""
Determine a version is a local git branch name or not.
:param version: A string containing the branch/tag/sha to be determined.
:param debug: An optional bool to toggle debug output.
:return: bool
"""
cmd = sh.git.bake('show-ref', '--verify', '--quiet',
"refs/heads/{}".format(version))
try:
util.run_command(cmd, debug=debug)
return True
except sh.ErrorReturnCode:
return False
|
metacloud/gilt
|
gilt/util.py
|
run_command
|
python
|
def run_command(cmd, debug=False):
if debug:
msg = ' PWD: {}'.format(os.getcwd())
print_warn(msg)
msg = ' COMMAND: {}'.format(cmd)
print_warn(msg)
cmd()
|
Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L46-L59
|
[
"def print_warn(msg):\n \"\"\" Print the given message to STDOUT in YELLOW. \"\"\"\n print('{}{}'.format(colorama.Fore.YELLOW, msg))\n"
] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import contextlib
import errno
import os
import sh
import shutil
import colorama
colorama.init(autoreset=True)
def print_info(msg):
""" Print the given message to STDOUT. """
print(msg)
def print_warn(msg):
""" Print the given message to STDOUT in YELLOW. """
print('{}{}'.format(colorama.Fore.YELLOW, msg))
def build_sh_cmd(cmd, cwd=None):
"""Build a `sh.Command` from a string.
:param cmd: String with the command to convert.
:param cwd: Optional path to use as working directory.
:return: `sh.Command`
"""
args = cmd.split()
return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
@contextlib.contextmanager
def saved_cwd():
""" Context manager to restore previous working directory. """
saved = os.getcwd()
try:
yield
finally:
os.chdir(saved)
def copy(src, dst):
"""
Handle the copying of a file or directory.
The destination basedir _must_ exist.
:param src: A string containing the path of the source to copy. If the
source ends with a '/', will become a recursive directory copy of source.
:param dst: A string containing the path to the destination. If the
destination ends with a '/', will copy into the target directory.
:return: None
"""
try:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
|
metacloud/gilt
|
gilt/util.py
|
build_sh_cmd
|
python
|
def build_sh_cmd(cmd, cwd=None):
args = cmd.split()
return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
|
Build a `sh.Command` from a string.
:param cmd: String with the command to convert.
:param cwd: Optional path to use as working directory.
:return: `sh.Command`
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L62-L70
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import contextlib
import errno
import os
import sh
import shutil
import colorama
colorama.init(autoreset=True)
def print_info(msg):
""" Print the given message to STDOUT. """
print(msg)
def print_warn(msg):
""" Print the given message to STDOUT in YELLOW. """
print('{}{}'.format(colorama.Fore.YELLOW, msg))
def run_command(cmd, debug=False):
"""
Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if debug:
msg = ' PWD: {}'.format(os.getcwd())
print_warn(msg)
msg = ' COMMAND: {}'.format(cmd)
print_warn(msg)
cmd()
@contextlib.contextmanager
def saved_cwd():
""" Context manager to restore previous working directory. """
saved = os.getcwd()
try:
yield
finally:
os.chdir(saved)
def copy(src, dst):
"""
Handle the copying of a file or directory.
The destination basedir _must_ exist.
:param src: A string containing the path of the source to copy. If the
source ends with a '/', will become a recursive directory copy of source.
:param dst: A string containing the path to the destination. If the
destination ends with a '/', will copy into the target directory.
:return: None
"""
try:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
|
metacloud/gilt
|
gilt/util.py
|
copy
|
python
|
def copy(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
raise
|
Handle the copying of a file or directory.
The destination basedir _must_ exist.
:param src: A string containing the path of the source to copy. If the
source ends with a '/', will become a recursive directory copy of source.
:param dst: A string containing the path to the destination. If the
destination ends with a '/', will copy into the target directory.
:return: None
|
train
|
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L83-L101
| null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import contextlib
import errno
import os
import sh
import shutil
import colorama
colorama.init(autoreset=True)
def print_info(msg):
""" Print the given message to STDOUT. """
print(msg)
def print_warn(msg):
""" Print the given message to STDOUT in YELLOW. """
print('{}{}'.format(colorama.Fore.YELLOW, msg))
def run_command(cmd, debug=False):
"""
Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if debug:
msg = ' PWD: {}'.format(os.getcwd())
print_warn(msg)
msg = ' COMMAND: {}'.format(cmd)
print_warn(msg)
cmd()
def build_sh_cmd(cmd, cwd=None):
"""Build a `sh.Command` from a string.
:param cmd: String with the command to convert.
:param cwd: Optional path to use as working directory.
:return: `sh.Command`
"""
args = cmd.split()
return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
@contextlib.contextmanager
def saved_cwd():
""" Context manager to restore previous working directory. """
saved = os.getcwd()
try:
yield
finally:
os.chdir(saved)
|
cyface/django-termsandconditions
|
termsandconditions/middleware.py
|
is_path_protected
|
python
|
def is_path_protected(path):
protected = True
for exclude_path in TERMS_EXCLUDE_URL_PREFIX_LIST:
if path.startswith(exclude_path):
protected = False
for contains_path in TERMS_EXCLUDE_URL_CONTAINS_LIST:
if contains_path in path:
protected = False
if path in TERMS_EXCLUDE_URL_LIST:
protected = False
if path.startswith(ACCEPT_TERMS_PATH):
protected = False
return protected
|
returns True if given path is to be protected, otherwise False
The path is not to be protected when it appears on:
TERMS_EXCLUDE_URL_PREFIX_LIST, TERMS_EXCLUDE_URL_LIST, TERMS_EXCLUDE_URL_CONTAINS_LIST or as
ACCEPT_TERMS_PATH
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/middleware.py#L49-L74
| null |
"""Terms and Conditions Middleware"""
from .models import TermsAndConditions
from django.conf import settings
import logging
from .pipeline import redirect_to_terms_accept
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION >= (1, 10, 0):
from django.utils.deprecation import MiddlewareMixin
else:
MiddlewareMixin = object
LOGGER = logging.getLogger(name='termsandconditions')
ACCEPT_TERMS_PATH = getattr(settings, 'ACCEPT_TERMS_PATH', '/terms/accept/')
TERMS_EXCLUDE_URL_PREFIX_LIST = getattr(settings, 'TERMS_EXCLUDE_URL_PREFIX_LIST', {'/admin', '/terms'})
TERMS_EXCLUDE_URL_CONTAINS_LIST = getattr(settings, 'TERMS_EXCLUDE_URL_CONTAINS_LIST', {})
TERMS_EXCLUDE_URL_LIST = getattr(settings, 'TERMS_EXCLUDE_URL_LIST', {'/', '/termsrequired/', '/logout/', '/securetoo/'})
class TermsAndConditionsRedirectMiddleware(MiddlewareMixin):
"""
This middleware checks to see if the user is logged in, and if so,
if they have accepted all the active terms.
"""
def process_request(self, request):
"""Process each request to app to ensure terms have been accepted"""
LOGGER.debug('termsandconditions.middleware')
current_path = request.META['PATH_INFO']
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if user_authenticated and is_path_protected(current_path):
for term in TermsAndConditions.get_active_terms_not_agreed_to(request.user):
# Check for querystring and include it if there is one
qs = request.META['QUERY_STRING']
current_path += '?' + qs if qs else ''
return redirect_to_terms_accept(current_path, term.slug)
return None
|
cyface/django-termsandconditions
|
termsandconditions/middleware.py
|
TermsAndConditionsRedirectMiddleware.process_request
|
python
|
def process_request(self, request):
LOGGER.debug('termsandconditions.middleware')
current_path = request.META['PATH_INFO']
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if user_authenticated and is_path_protected(current_path):
for term in TermsAndConditions.get_active_terms_not_agreed_to(request.user):
# Check for querystring and include it if there is one
qs = request.META['QUERY_STRING']
current_path += '?' + qs if qs else ''
return redirect_to_terms_accept(current_path, term.slug)
return None
|
Process each request to app to ensure terms have been accepted
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/middleware.py#L27-L46
|
[
"def redirect_to_terms_accept(current_path='/', slug='default'):\n \"\"\"Redirect the user to the terms and conditions accept page.\"\"\"\n redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))\n if slug != 'default':\n redirect_url_parts[2] += slug\n querystring = QueryDict(redirect_url_parts[4], mutable=True)\n querystring[TERMS_RETURNTO_PARAM] = current_path\n redirect_url_parts[4] = querystring.urlencode(safe='/')\n return HttpResponseRedirect(urlunparse(redirect_url_parts))\n",
"def is_path_protected(path):\n \"\"\"\n returns True if given path is to be protected, otherwise False\n\n The path is not to be protected when it appears on:\n TERMS_EXCLUDE_URL_PREFIX_LIST, TERMS_EXCLUDE_URL_LIST, TERMS_EXCLUDE_URL_CONTAINS_LIST or as\n ACCEPT_TERMS_PATH\n \"\"\"\n protected = True\n\n for exclude_path in TERMS_EXCLUDE_URL_PREFIX_LIST:\n if path.startswith(exclude_path):\n protected = False\n\n for contains_path in TERMS_EXCLUDE_URL_CONTAINS_LIST:\n if contains_path in path:\n protected = False\n\n if path in TERMS_EXCLUDE_URL_LIST:\n protected = False\n\n if path.startswith(ACCEPT_TERMS_PATH):\n protected = False\n\n\n return protected\n",
"def get_active_terms_not_agreed_to(user):\n \"\"\"Checks to see if a specified user has agreed to all the latest terms and conditions\"\"\"\n\n if TERMS_EXCLUDE_USERS_WITH_PERM is not None:\n if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:\n # Django's has_perm() returns True if is_superuser, we don't want that\n return []\n\n not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())\n if not_agreed_terms is None:\n try:\n LOGGER.debug(\"Not Agreed Terms\")\n not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(\n userterms__in=UserTermsAndConditions.objects.filter(user=user)\n ).order_by('slug')\n\n cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)\n except (TypeError, UserTermsAndConditions.DoesNotExist):\n return []\n\n return not_agreed_terms\n"
] |
class TermsAndConditionsRedirectMiddleware(MiddlewareMixin):
"""
This middleware checks to see if the user is logged in, and if so,
if they have accepted all the active terms.
"""
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
GetTermsViewMixin.get_terms
|
python
|
def get_terms(self, kwargs):
slug = kwargs.get("slug")
version = kwargs.get("version")
if slug and version:
terms = [TermsAndConditions.objects.filter(slug=slug, version_number=version).latest('date_active')]
elif slug:
terms = [TermsAndConditions.get_active(slug)]
else:
# Return a list of not agreed to terms for the current user for the list view
terms = TermsAndConditions.get_active_terms_not_agreed_to(self.request.user)
return terms
|
Checks URL parameters for slug and/or version to pull the right TermsAndConditions object
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L28-L41
|
[
"def get_active(slug=DEFAULT_TERMS_SLUG):\n \"\"\"Finds the latest of a particular terms and conditions\"\"\"\n\n active_terms = cache.get('tandc.active_terms_' + slug)\n if active_terms is None:\n try:\n active_terms = TermsAndConditions.objects.filter(\n date_active__isnull=False,\n date_active__lte=timezone.now(),\n slug=slug).latest('date_active')\n cache.set('tandc.active_terms_' + slug, active_terms, TERMS_CACHE_SECONDS)\n except TermsAndConditions.DoesNotExist: # pragma: nocover\n LOGGER.error(\"Requested Terms and Conditions that Have Not Been Created.\")\n return None\n\n return active_terms\n",
"def get_active_terms_not_agreed_to(user):\n \"\"\"Checks to see if a specified user has agreed to all the latest terms and conditions\"\"\"\n\n if TERMS_EXCLUDE_USERS_WITH_PERM is not None:\n if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:\n # Django's has_perm() returns True if is_superuser, we don't want that\n return []\n\n not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())\n if not_agreed_terms is None:\n try:\n LOGGER.debug(\"Not Agreed Terms\")\n not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(\n userterms__in=UserTermsAndConditions.objects.filter(user=user)\n ).order_by('slug')\n\n cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)\n except (TypeError, UserTermsAndConditions.DoesNotExist):\n return []\n\n return not_agreed_terms\n"
] |
class GetTermsViewMixin(object):
"""Checks URL parameters for slug and/or version to pull the right TermsAndConditions object"""
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
TermsView.get_context_data
|
python
|
def get_context_data(self, **kwargs):
context = super(TermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
|
Pass additional context data
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L53-L57
| null |
class TermsView(DetailView, GetTermsViewMixin):
"""
View Terms and Conditions View
url: /terms/view
"""
template_name = "termsandconditions/tc_view_terms.html"
context_object_name = 'terms_list'
def get_object(self, queryset=None):
"""Override of DetailView method, queries for which T&C to return"""
LOGGER.debug('termsandconditions.views.TermsView.get_object')
return self.get_terms(self.kwargs)
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
AcceptTermsView.get_initial
|
python
|
def get_initial(self):
LOGGER.debug('termsandconditions.views.AcceptTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
|
Override of CreateView method, queries for which T&C to accept and catches returnTo from URL
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L82-L89
| null |
class AcceptTermsView(CreateView, GetTermsViewMixin):
"""
Terms and Conditions Acceptance view
url: /terms/accept
"""
model = UserTermsAndConditions
form_class = UserTermsAndConditionsModelForm
template_name = "termsandconditions/tc_accept_terms.html"
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(AcceptTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def post(self, request, *args, **kwargs):
"""
Handles POST request.
"""
return_url = request.POST.get('returnTo', '/')
terms_ids = request.POST.getlist('terms')
if not terms_ids: # pragma: nocover
return HttpResponseRedirect(return_url)
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if user_authenticated:
user = request.user
else:
# Get user out of saved pipeline from django-socialauth
if 'partial_pipeline' in request.session:
user_pk = request.session['partial_pipeline']['kwargs']['user']['pk']
user = User.objects.get(id=user_pk)
else:
return HttpResponseRedirect('/')
store_ip_address = getattr(settings, 'TERMS_STORE_IP_ADDRESS', True)
if store_ip_address:
ip_address = request.META.get(getattr(settings, 'TERMS_IP_HEADER_NAME', DEFAULT_TERMS_IP_HEADER_NAME))
else:
ip_address = ""
for terms_id in terms_ids:
try:
new_user_terms = UserTermsAndConditions(
user=user,
terms=TermsAndConditions.objects.get(pk=int(terms_id)),
ip_address=ip_address
)
new_user_terms.save()
except IntegrityError: # pragma: nocover
pass
return HttpResponseRedirect(return_url)
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
AcceptTermsView.post
|
python
|
def post(self, request, *args, **kwargs):
return_url = request.POST.get('returnTo', '/')
terms_ids = request.POST.getlist('terms')
if not terms_ids: # pragma: nocover
return HttpResponseRedirect(return_url)
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if user_authenticated:
user = request.user
else:
# Get user out of saved pipeline from django-socialauth
if 'partial_pipeline' in request.session:
user_pk = request.session['partial_pipeline']['kwargs']['user']['pk']
user = User.objects.get(id=user_pk)
else:
return HttpResponseRedirect('/')
store_ip_address = getattr(settings, 'TERMS_STORE_IP_ADDRESS', True)
if store_ip_address:
ip_address = request.META.get(getattr(settings, 'TERMS_IP_HEADER_NAME', DEFAULT_TERMS_IP_HEADER_NAME))
else:
ip_address = ""
for terms_id in terms_ids:
try:
new_user_terms = UserTermsAndConditions(
user=user,
terms=TermsAndConditions.objects.get(pk=int(terms_id)),
ip_address=ip_address
)
new_user_terms.save()
except IntegrityError: # pragma: nocover
pass
return HttpResponseRedirect(return_url)
|
Handles POST request.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L91-L133
| null |
class AcceptTermsView(CreateView, GetTermsViewMixin):
"""
Terms and Conditions Acceptance view
url: /terms/accept
"""
model = UserTermsAndConditions
form_class = UserTermsAndConditionsModelForm
template_name = "termsandconditions/tc_accept_terms.html"
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(AcceptTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_initial(self):
"""Override of CreateView method, queries for which T&C to accept and catches returnTo from URL"""
LOGGER.debug('termsandconditions.views.AcceptTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
EmailTermsView.form_valid
|
python
|
def form_valid(self, form):
LOGGER.debug('termsandconditions.views.EmailTermsView.form_valid')
template = get_template("termsandconditions/tc_email_terms.html")
template_rendered = template.render({"terms": form.cleaned_data.get('terms')})
LOGGER.debug("Email Terms Body:")
LOGGER.debug(template_rendered)
try:
send_mail(form.cleaned_data.get('email_subject', _('Terms')),
template_rendered,
settings.DEFAULT_FROM_EMAIL,
[form.cleaned_data.get('email_address')],
fail_silently=False)
messages.add_message(self.request, messages.INFO, _("Terms and Conditions Sent."))
except SMTPException: # pragma: no cover
messages.add_message(self.request, messages.ERROR, _("An Error Occurred Sending Your Message."))
self.success_url = form.cleaned_data.get('returnTo', '/') or '/'
return super(EmailTermsView, self).form_valid(form)
|
Override of CreateView method, sends the email.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L162-L184
| null |
class EmailTermsView(FormView, GetTermsViewMixin):
"""
Email Terms and Conditions View
url: /terms/email
"""
template_name = "termsandconditions/tc_email_terms_form.html"
form_class = EmailTermsForm
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(EmailTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_initial(self):
"""Override of CreateView method, queries for which T&C send, catches returnTo from URL"""
LOGGER.debug('termsandconditions.views.EmailTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
def form_invalid(self, form):
"""Override of CreateView method, logs invalid email form submissions."""
LOGGER.debug("Invalid Email Form Submitted")
messages.add_message(self.request, messages.ERROR, _("Invalid Email Address."))
return super(EmailTermsView, self).form_invalid(form)
|
cyface/django-termsandconditions
|
termsandconditions/views.py
|
EmailTermsView.form_invalid
|
python
|
def form_invalid(self, form):
LOGGER.debug("Invalid Email Form Submitted")
messages.add_message(self.request, messages.ERROR, _("Invalid Email Address."))
return super(EmailTermsView, self).form_invalid(form)
|
Override of CreateView method, logs invalid email form submissions.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L186-L190
| null |
class EmailTermsView(FormView, GetTermsViewMixin):
"""
Email Terms and Conditions View
url: /terms/email
"""
template_name = "termsandconditions/tc_email_terms_form.html"
form_class = EmailTermsForm
def get_context_data(self, **kwargs):
"""Pass additional context data"""
context = super(EmailTermsView, self).get_context_data(**kwargs)
context['terms_base_template'] = getattr(settings, 'TERMS_BASE_TEMPLATE', DEFAULT_TERMS_BASE_TEMPLATE)
return context
def get_initial(self):
"""Override of CreateView method, queries for which T&C send, catches returnTo from URL"""
LOGGER.debug('termsandconditions.views.EmailTermsView.get_initial')
terms = self.get_terms(self.kwargs)
return_to = self.request.GET.get('returnTo', '/')
return {'terms': terms, 'returnTo': return_to}
def form_valid(self, form):
"""Override of CreateView method, sends the email."""
LOGGER.debug('termsandconditions.views.EmailTermsView.form_valid')
template = get_template("termsandconditions/tc_email_terms.html")
template_rendered = template.render({"terms": form.cleaned_data.get('terms')})
LOGGER.debug("Email Terms Body:")
LOGGER.debug(template_rendered)
try:
send_mail(form.cleaned_data.get('email_subject', _('Terms')),
template_rendered,
settings.DEFAULT_FROM_EMAIL,
[form.cleaned_data.get('email_address')],
fail_silently=False)
messages.add_message(self.request, messages.INFO, _("Terms and Conditions Sent."))
except SMTPException: # pragma: no cover
messages.add_message(self.request, messages.ERROR, _("An Error Occurred Sending Your Message."))
self.success_url = form.cleaned_data.get('returnTo', '/') or '/'
return super(EmailTermsView, self).form_valid(form)
|
cyface/django-termsandconditions
|
termsandconditions/decorators.py
|
terms_required
|
python
|
def terms_required(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
"""Method to wrap the view passed in"""
# If user has not logged in, or if they have logged in and already agreed to the terms, let the view through
if DJANGO_VERSION <= (2, 0, 0):
user_authenticated = request.user.is_authenticated()
else:
user_authenticated = request.user.is_authenticated
if not user_authenticated or not TermsAndConditions.get_active_terms_not_agreed_to(request.user):
return view_func(request, *args, **kwargs)
# Otherwise, redirect to terms accept
current_path = request.path
login_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring['returnTo'] = current_path
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
return _wrapped_view
|
This decorator checks to see if the user is logged in, and if so, if they have accepted the site terms.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/decorators.py#L11-L36
| null |
"""View Decorators for termsandconditions module"""
from django import VERSION as DJANGO_VERSION
from future.moves.urllib.parse import urlparse, urlunparse
from functools import wraps
from django.http import HttpResponseRedirect, QueryDict
from django.utils.decorators import available_attrs
from .models import TermsAndConditions
from .middleware import ACCEPT_TERMS_PATH
|
cyface/django-termsandconditions
|
termsandconditions/models.py
|
TermsAndConditions.get_active
|
python
|
def get_active(slug=DEFAULT_TERMS_SLUG):
active_terms = cache.get('tandc.active_terms_' + slug)
if active_terms is None:
try:
active_terms = TermsAndConditions.objects.filter(
date_active__isnull=False,
date_active__lte=timezone.now(),
slug=slug).latest('date_active')
cache.set('tandc.active_terms_' + slug, active_terms, TERMS_CACHE_SECONDS)
except TermsAndConditions.DoesNotExist: # pragma: nocover
LOGGER.error("Requested Terms and Conditions that Have Not Been Created.")
return None
return active_terms
|
Finds the latest of a particular terms and conditions
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/models.py#L75-L90
| null |
class TermsAndConditions(models.Model):
"""Holds Versions of TermsAndConditions
Active one for a given slug is: date_active is not Null and is latest not in future"""
slug = models.SlugField(default=DEFAULT_TERMS_SLUG)
name = models.TextField(max_length=255)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through=UserTermsAndConditions, blank=True)
version_number = models.DecimalField(default=1.0, decimal_places=2, max_digits=6)
text = models.TextField(null=True, blank=True)
info = models.TextField(
null=True, blank=True, help_text=_("Provide users with some info about what's changed and why")
)
date_active = models.DateTimeField(blank=True, null=True, help_text=_("Leave Null To Never Make Active"))
date_created = models.DateTimeField(blank=True, auto_now_add=True)
class Meta:
"""Model Meta Information"""
ordering = ['-date_active', ]
get_latest_by = 'date_active'
verbose_name = 'Terms and Conditions'
verbose_name_plural = 'Terms and Conditions'
def __str__(self): # pragma: nocover
return "{0}-{1:.2f}".format(self.slug, self.version_number)
def get_absolute_url(self):
return reverse(
'tc_view_specific_version_page',
args=[self.slug, self.version_number]) # pylint: disable=E1101
@staticmethod
@staticmethod
def get_active_terms_ids():
"""Returns a list of the IDs of of all terms and conditions"""
active_terms_ids = cache.get('tandc.active_terms_ids')
if active_terms_ids is None:
active_terms_dict = {}
active_terms_ids = []
active_terms_set = TermsAndConditions.objects.filter(date_active__isnull=False, date_active__lte=timezone.now()).order_by('date_active')
for active_terms in active_terms_set:
active_terms_dict[active_terms.slug] = active_terms.id
active_terms_dict = OrderedDict(sorted(active_terms_dict.items(), key=lambda t: t[0]))
for terms in active_terms_dict:
active_terms_ids.append(active_terms_dict[terms])
cache.set('tandc.active_terms_ids', active_terms_ids, TERMS_CACHE_SECONDS)
return active_terms_ids
@staticmethod
def get_active_terms_list():
"""Returns all the latest active terms and conditions"""
active_terms_list = cache.get('tandc.active_terms_list')
if active_terms_list is None:
active_terms_list = TermsAndConditions.objects.filter(id__in=TermsAndConditions.get_active_terms_ids()).order_by('slug')
cache.set('tandc.active_terms_list', active_terms_list, TERMS_CACHE_SECONDS)
return active_terms_list
@staticmethod
def get_active_terms_not_agreed_to(user):
"""Checks to see if a specified user has agreed to all the latest terms and conditions"""
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:
# Django's has_perm() returns True if is_superuser, we don't want that
return []
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug("Not Agreed Terms")
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(
userterms__in=UserTermsAndConditions.objects.filter(user=user)
).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)
except (TypeError, UserTermsAndConditions.DoesNotExist):
return []
return not_agreed_terms
|
cyface/django-termsandconditions
|
termsandconditions/models.py
|
TermsAndConditions.get_active_terms_ids
|
python
|
def get_active_terms_ids():
active_terms_ids = cache.get('tandc.active_terms_ids')
if active_terms_ids is None:
active_terms_dict = {}
active_terms_ids = []
active_terms_set = TermsAndConditions.objects.filter(date_active__isnull=False, date_active__lte=timezone.now()).order_by('date_active')
for active_terms in active_terms_set:
active_terms_dict[active_terms.slug] = active_terms.id
active_terms_dict = OrderedDict(sorted(active_terms_dict.items(), key=lambda t: t[0]))
for terms in active_terms_dict:
active_terms_ids.append(active_terms_dict[terms])
cache.set('tandc.active_terms_ids', active_terms_ids, TERMS_CACHE_SECONDS)
return active_terms_ids
|
Returns a list of the IDs of of all terms and conditions
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/models.py#L93-L112
| null |
class TermsAndConditions(models.Model):
"""Holds Versions of TermsAndConditions
Active one for a given slug is: date_active is not Null and is latest not in future"""
slug = models.SlugField(default=DEFAULT_TERMS_SLUG)
name = models.TextField(max_length=255)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through=UserTermsAndConditions, blank=True)
version_number = models.DecimalField(default=1.0, decimal_places=2, max_digits=6)
text = models.TextField(null=True, blank=True)
info = models.TextField(
null=True, blank=True, help_text=_("Provide users with some info about what's changed and why")
)
date_active = models.DateTimeField(blank=True, null=True, help_text=_("Leave Null To Never Make Active"))
date_created = models.DateTimeField(blank=True, auto_now_add=True)
class Meta:
"""Model Meta Information"""
ordering = ['-date_active', ]
get_latest_by = 'date_active'
verbose_name = 'Terms and Conditions'
verbose_name_plural = 'Terms and Conditions'
def __str__(self): # pragma: nocover
return "{0}-{1:.2f}".format(self.slug, self.version_number)
def get_absolute_url(self):
return reverse(
'tc_view_specific_version_page',
args=[self.slug, self.version_number]) # pylint: disable=E1101
@staticmethod
def get_active(slug=DEFAULT_TERMS_SLUG):
"""Finds the latest of a particular terms and conditions"""
active_terms = cache.get('tandc.active_terms_' + slug)
if active_terms is None:
try:
active_terms = TermsAndConditions.objects.filter(
date_active__isnull=False,
date_active__lte=timezone.now(),
slug=slug).latest('date_active')
cache.set('tandc.active_terms_' + slug, active_terms, TERMS_CACHE_SECONDS)
except TermsAndConditions.DoesNotExist: # pragma: nocover
LOGGER.error("Requested Terms and Conditions that Have Not Been Created.")
return None
return active_terms
@staticmethod
@staticmethod
def get_active_terms_list():
"""Returns all the latest active terms and conditions"""
active_terms_list = cache.get('tandc.active_terms_list')
if active_terms_list is None:
active_terms_list = TermsAndConditions.objects.filter(id__in=TermsAndConditions.get_active_terms_ids()).order_by('slug')
cache.set('tandc.active_terms_list', active_terms_list, TERMS_CACHE_SECONDS)
return active_terms_list
@staticmethod
def get_active_terms_not_agreed_to(user):
"""Checks to see if a specified user has agreed to all the latest terms and conditions"""
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:
# Django's has_perm() returns True if is_superuser, we don't want that
return []
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug("Not Agreed Terms")
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(
userterms__in=UserTermsAndConditions.objects.filter(user=user)
).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)
except (TypeError, UserTermsAndConditions.DoesNotExist):
return []
return not_agreed_terms
|
cyface/django-termsandconditions
|
termsandconditions/models.py
|
TermsAndConditions.get_active_terms_list
|
python
|
def get_active_terms_list():
active_terms_list = cache.get('tandc.active_terms_list')
if active_terms_list is None:
active_terms_list = TermsAndConditions.objects.filter(id__in=TermsAndConditions.get_active_terms_ids()).order_by('slug')
cache.set('tandc.active_terms_list', active_terms_list, TERMS_CACHE_SECONDS)
return active_terms_list
|
Returns all the latest active terms and conditions
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/models.py#L115-L123
|
[
"def get_active_terms_ids():\n \"\"\"Returns a list of the IDs of of all terms and conditions\"\"\"\n\n active_terms_ids = cache.get('tandc.active_terms_ids')\n if active_terms_ids is None:\n active_terms_dict = {}\n active_terms_ids = []\n\n active_terms_set = TermsAndConditions.objects.filter(date_active__isnull=False, date_active__lte=timezone.now()).order_by('date_active')\n for active_terms in active_terms_set:\n active_terms_dict[active_terms.slug] = active_terms.id\n\n active_terms_dict = OrderedDict(sorted(active_terms_dict.items(), key=lambda t: t[0]))\n\n for terms in active_terms_dict:\n active_terms_ids.append(active_terms_dict[terms])\n\n cache.set('tandc.active_terms_ids', active_terms_ids, TERMS_CACHE_SECONDS)\n\n return active_terms_ids\n"
] |
class TermsAndConditions(models.Model):
"""Holds Versions of TermsAndConditions
Active one for a given slug is: date_active is not Null and is latest not in future"""
slug = models.SlugField(default=DEFAULT_TERMS_SLUG)
name = models.TextField(max_length=255)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through=UserTermsAndConditions, blank=True)
version_number = models.DecimalField(default=1.0, decimal_places=2, max_digits=6)
text = models.TextField(null=True, blank=True)
info = models.TextField(
null=True, blank=True, help_text=_("Provide users with some info about what's changed and why")
)
date_active = models.DateTimeField(blank=True, null=True, help_text=_("Leave Null To Never Make Active"))
date_created = models.DateTimeField(blank=True, auto_now_add=True)
class Meta:
"""Model Meta Information"""
ordering = ['-date_active', ]
get_latest_by = 'date_active'
verbose_name = 'Terms and Conditions'
verbose_name_plural = 'Terms and Conditions'
def __str__(self): # pragma: nocover
return "{0}-{1:.2f}".format(self.slug, self.version_number)
def get_absolute_url(self):
return reverse(
'tc_view_specific_version_page',
args=[self.slug, self.version_number]) # pylint: disable=E1101
@staticmethod
def get_active(slug=DEFAULT_TERMS_SLUG):
"""Finds the latest of a particular terms and conditions"""
active_terms = cache.get('tandc.active_terms_' + slug)
if active_terms is None:
try:
active_terms = TermsAndConditions.objects.filter(
date_active__isnull=False,
date_active__lte=timezone.now(),
slug=slug).latest('date_active')
cache.set('tandc.active_terms_' + slug, active_terms, TERMS_CACHE_SECONDS)
except TermsAndConditions.DoesNotExist: # pragma: nocover
LOGGER.error("Requested Terms and Conditions that Have Not Been Created.")
return None
return active_terms
@staticmethod
def get_active_terms_ids():
"""Returns a list of the IDs of of all terms and conditions"""
active_terms_ids = cache.get('tandc.active_terms_ids')
if active_terms_ids is None:
active_terms_dict = {}
active_terms_ids = []
active_terms_set = TermsAndConditions.objects.filter(date_active__isnull=False, date_active__lte=timezone.now()).order_by('date_active')
for active_terms in active_terms_set:
active_terms_dict[active_terms.slug] = active_terms.id
active_terms_dict = OrderedDict(sorted(active_terms_dict.items(), key=lambda t: t[0]))
for terms in active_terms_dict:
active_terms_ids.append(active_terms_dict[terms])
cache.set('tandc.active_terms_ids', active_terms_ids, TERMS_CACHE_SECONDS)
return active_terms_ids
@staticmethod
@staticmethod
def get_active_terms_not_agreed_to(user):
"""Checks to see if a specified user has agreed to all the latest terms and conditions"""
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:
# Django's has_perm() returns True if is_superuser, we don't want that
return []
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug("Not Agreed Terms")
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(
userterms__in=UserTermsAndConditions.objects.filter(user=user)
).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)
except (TypeError, UserTermsAndConditions.DoesNotExist):
return []
return not_agreed_terms
|
cyface/django-termsandconditions
|
termsandconditions/models.py
|
TermsAndConditions.get_active_terms_not_agreed_to
|
python
|
def get_active_terms_not_agreed_to(user):
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:
# Django's has_perm() returns True if is_superuser, we don't want that
return []
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug("Not Agreed Terms")
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(
userterms__in=UserTermsAndConditions.objects.filter(user=user)
).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)
except (TypeError, UserTermsAndConditions.DoesNotExist):
return []
return not_agreed_terms
|
Checks to see if a specified user has agreed to all the latest terms and conditions
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/models.py#L126-L146
|
[
"def get_active_terms_list():\n \"\"\"Returns all the latest active terms and conditions\"\"\"\n\n active_terms_list = cache.get('tandc.active_terms_list')\n if active_terms_list is None:\n active_terms_list = TermsAndConditions.objects.filter(id__in=TermsAndConditions.get_active_terms_ids()).order_by('slug')\n cache.set('tandc.active_terms_list', active_terms_list, TERMS_CACHE_SECONDS)\n\n return active_terms_list\n"
] |
class TermsAndConditions(models.Model):
"""Holds Versions of TermsAndConditions
Active one for a given slug is: date_active is not Null and is latest not in future"""
slug = models.SlugField(default=DEFAULT_TERMS_SLUG)
name = models.TextField(max_length=255)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through=UserTermsAndConditions, blank=True)
version_number = models.DecimalField(default=1.0, decimal_places=2, max_digits=6)
text = models.TextField(null=True, blank=True)
info = models.TextField(
null=True, blank=True, help_text=_("Provide users with some info about what's changed and why")
)
date_active = models.DateTimeField(blank=True, null=True, help_text=_("Leave Null To Never Make Active"))
date_created = models.DateTimeField(blank=True, auto_now_add=True)
class Meta:
"""Model Meta Information"""
ordering = ['-date_active', ]
get_latest_by = 'date_active'
verbose_name = 'Terms and Conditions'
verbose_name_plural = 'Terms and Conditions'
def __str__(self): # pragma: nocover
return "{0}-{1:.2f}".format(self.slug, self.version_number)
def get_absolute_url(self):
return reverse(
'tc_view_specific_version_page',
args=[self.slug, self.version_number]) # pylint: disable=E1101
@staticmethod
def get_active(slug=DEFAULT_TERMS_SLUG):
"""Finds the latest of a particular terms and conditions"""
active_terms = cache.get('tandc.active_terms_' + slug)
if active_terms is None:
try:
active_terms = TermsAndConditions.objects.filter(
date_active__isnull=False,
date_active__lte=timezone.now(),
slug=slug).latest('date_active')
cache.set('tandc.active_terms_' + slug, active_terms, TERMS_CACHE_SECONDS)
except TermsAndConditions.DoesNotExist: # pragma: nocover
LOGGER.error("Requested Terms and Conditions that Have Not Been Created.")
return None
return active_terms
@staticmethod
def get_active_terms_ids():
"""Returns a list of the IDs of of all terms and conditions"""
active_terms_ids = cache.get('tandc.active_terms_ids')
if active_terms_ids is None:
active_terms_dict = {}
active_terms_ids = []
active_terms_set = TermsAndConditions.objects.filter(date_active__isnull=False, date_active__lte=timezone.now()).order_by('date_active')
for active_terms in active_terms_set:
active_terms_dict[active_terms.slug] = active_terms.id
active_terms_dict = OrderedDict(sorted(active_terms_dict.items(), key=lambda t: t[0]))
for terms in active_terms_dict:
active_terms_ids.append(active_terms_dict[terms])
cache.set('tandc.active_terms_ids', active_terms_ids, TERMS_CACHE_SECONDS)
return active_terms_ids
@staticmethod
def get_active_terms_list():
"""Returns all the latest active terms and conditions"""
active_terms_list = cache.get('tandc.active_terms_list')
if active_terms_list is None:
active_terms_list = TermsAndConditions.objects.filter(id__in=TermsAndConditions.get_active_terms_ids()).order_by('slug')
cache.set('tandc.active_terms_list', active_terms_list, TERMS_CACHE_SECONDS)
return active_terms_list
@staticmethod
|
cyface/django-termsandconditions
|
termsandconditions/templatetags/terms_tags.py
|
show_terms_if_not_agreed
|
python
|
def show_terms_if_not_agreed(context, field=TERMS_HTTP_PATH_FIELD):
request = context['request']
url = urlparse(request.META[field])
not_agreed_terms = TermsAndConditions.get_active_terms_not_agreed_to(request.user)
if not_agreed_terms and is_path_protected(url.path):
return {'not_agreed_terms': not_agreed_terms, 'returnTo': url.path}
else:
return {}
|
Displays a modal on a current page if a user has not yet agreed to the
given terms. If terms are not specified, the default slug is used.
A small snippet is included into your template if a user
who requested the view has not yet agreed the terms. The snippet takes
care of displaying a respective modal.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/templatetags/terms_tags.py#L15-L30
|
[
"def is_path_protected(path):\n \"\"\"\n returns True if given path is to be protected, otherwise False\n\n The path is not to be protected when it appears on:\n TERMS_EXCLUDE_URL_PREFIX_LIST, TERMS_EXCLUDE_URL_LIST, TERMS_EXCLUDE_URL_CONTAINS_LIST or as\n ACCEPT_TERMS_PATH\n \"\"\"\n protected = True\n\n for exclude_path in TERMS_EXCLUDE_URL_PREFIX_LIST:\n if path.startswith(exclude_path):\n protected = False\n\n for contains_path in TERMS_EXCLUDE_URL_CONTAINS_LIST:\n if contains_path in path:\n protected = False\n\n if path in TERMS_EXCLUDE_URL_LIST:\n protected = False\n\n if path.startswith(ACCEPT_TERMS_PATH):\n protected = False\n\n\n return protected\n",
"def get_active_terms_not_agreed_to(user):\n \"\"\"Checks to see if a specified user has agreed to all the latest terms and conditions\"\"\"\n\n if TERMS_EXCLUDE_USERS_WITH_PERM is not None:\n if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:\n # Django's has_perm() returns True if is_superuser, we don't want that\n return []\n\n not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())\n if not_agreed_terms is None:\n try:\n LOGGER.debug(\"Not Agreed Terms\")\n not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(\n userterms__in=UserTermsAndConditions.objects.filter(user=user)\n ).order_by('slug')\n\n cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)\n except (TypeError, UserTermsAndConditions.DoesNotExist):\n return []\n\n return not_agreed_terms\n"
] |
"""Django Tags"""
from django import template
from ..models import TermsAndConditions
from ..middleware import is_path_protected
from django.conf import settings
from future.moves.urllib.parse import urlparse
register = template.Library()
DEFAULT_HTTP_PATH_FIELD = 'PATH_INFO'
TERMS_HTTP_PATH_FIELD = getattr(settings, 'TERMS_HTTP_PATH_FIELD', DEFAULT_HTTP_PATH_FIELD)
@register.inclusion_tag('termsandconditions/snippets/termsandconditions.html',
takes_context=True)
@register.filter
def as_template(obj):
"""Converts objects to a Template instance
This is useful in cases when a template variable contains html-like text,
which includes also django template language tags and should be rendered.
For instance, in case of termsandconditions object, its text field may
include a string such as `<a href="{% url 'my-url' %}">My url</a>`,
which should be properly rendered.
To achieve this goal, one can use template `include` with `as_template`
filter:
...
{% include your_variable|as_template %}
...
"""
return template.Template(obj)
|
cyface/django-termsandconditions
|
termsandconditions/pipeline.py
|
user_accept_terms
|
python
|
def user_accept_terms(backend, user, uid, social_user=None, *args, **kwargs):
LOGGER.debug('user_accept_terms')
if TermsAndConditions.get_active_terms_not_agreed_to(user):
return redirect_to_terms_accept('/')
else:
return {'social_user': social_user, 'user': user}
|
Check if the user has accepted the terms and conditions after creation.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/pipeline.py#L17-L25
|
[
"def redirect_to_terms_accept(current_path='/', slug='default'):\n \"\"\"Redirect the user to the terms and conditions accept page.\"\"\"\n redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))\n if slug != 'default':\n redirect_url_parts[2] += slug\n querystring = QueryDict(redirect_url_parts[4], mutable=True)\n querystring[TERMS_RETURNTO_PARAM] = current_path\n redirect_url_parts[4] = querystring.urlencode(safe='/')\n return HttpResponseRedirect(urlunparse(redirect_url_parts))\n",
"def get_active_terms_not_agreed_to(user):\n \"\"\"Checks to see if a specified user has agreed to all the latest terms and conditions\"\"\"\n\n if TERMS_EXCLUDE_USERS_WITH_PERM is not None:\n if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:\n # Django's has_perm() returns True if is_superuser, we don't want that\n return []\n\n not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())\n if not_agreed_terms is None:\n try:\n LOGGER.debug(\"Not Agreed Terms\")\n not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(\n userterms__in=UserTermsAndConditions.objects.filter(user=user)\n ).order_by('slug')\n\n cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)\n except (TypeError, UserTermsAndConditions.DoesNotExist):\n return []\n\n return not_agreed_terms\n"
] |
"""This file contains functions used as part of a user creation pipeline, such as django-social-auth."""
# pylint: disable=W0613
from future.moves.urllib.parse import urlparse, urlunparse
from .models import TermsAndConditions
from django.http import HttpResponseRedirect, QueryDict
from django.conf import settings
import logging
ACCEPT_TERMS_PATH = getattr(settings, 'ACCEPT_TERMS_PATH', '/terms/accept/')
TERMS_RETURNTO_PARAM = getattr(settings, 'TERMS_RETURNTO_PARAM', 'returnTo')
LOGGER = logging.getLogger(name='termsandconditions')
def redirect_to_terms_accept(current_path='/', slug='default'):
"""Redirect the user to the terms and conditions accept page."""
redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
if slug != 'default':
redirect_url_parts[2] += slug
querystring = QueryDict(redirect_url_parts[4], mutable=True)
querystring[TERMS_RETURNTO_PARAM] = current_path
redirect_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(redirect_url_parts))
|
cyface/django-termsandconditions
|
termsandconditions/pipeline.py
|
redirect_to_terms_accept
|
python
|
def redirect_to_terms_accept(current_path='/', slug='default'):
redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
if slug != 'default':
redirect_url_parts[2] += slug
querystring = QueryDict(redirect_url_parts[4], mutable=True)
querystring[TERMS_RETURNTO_PARAM] = current_path
redirect_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(redirect_url_parts))
|
Redirect the user to the terms and conditions accept page.
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/pipeline.py#L28-L36
| null |
"""This file contains functions used as part of a user creation pipeline, such as django-social-auth."""
# pylint: disable=W0613
from future.moves.urllib.parse import urlparse, urlunparse
from .models import TermsAndConditions
from django.http import HttpResponseRedirect, QueryDict
from django.conf import settings
import logging
ACCEPT_TERMS_PATH = getattr(settings, 'ACCEPT_TERMS_PATH', '/terms/accept/')
TERMS_RETURNTO_PARAM = getattr(settings, 'TERMS_RETURNTO_PARAM', 'returnTo')
LOGGER = logging.getLogger(name='termsandconditions')
def user_accept_terms(backend, user, uid, social_user=None, *args, **kwargs):
"""Check if the user has accepted the terms and conditions after creation."""
LOGGER.debug('user_accept_terms')
if TermsAndConditions.get_active_terms_not_agreed_to(user):
return redirect_to_terms_accept('/')
else:
return {'social_user': social_user, 'user': user}
|
cyface/django-termsandconditions
|
termsandconditions/signals.py
|
user_terms_updated
|
python
|
def user_terms_updated(sender, **kwargs):
LOGGER.debug("User T&C Updated Signal Handler")
if kwargs.get('instance').user:
cache.delete('tandc.not_agreed_terms_' + kwargs.get('instance').user.get_username())
|
Called when user terms and conditions is changed - to force cache clearing
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/signals.py#L15-L19
| null |
""" Signals for Django """
# pylint: disable=C1001,E0202,W0613
import logging
from django.core.cache import cache
from django.dispatch import receiver
from .models import TermsAndConditions, UserTermsAndConditions
from django.db.models.signals import post_delete, post_save
LOGGER = logging.getLogger(name='termsandconditions')
@receiver([post_delete, post_save], sender=UserTermsAndConditions)
@receiver([post_delete, post_save], sender=TermsAndConditions)
def terms_updated(sender, **kwargs):
"""Called when terms and conditions is changed - to force cache clearing"""
LOGGER.debug("T&C Updated Signal Handler")
cache.delete('tandc.active_terms_ids')
cache.delete('tandc.active_terms_list')
if kwargs.get('instance').slug:
cache.delete('tandc.active_terms_' + kwargs.get('instance').slug)
for utandc in UserTermsAndConditions.objects.all():
cache.delete('tandc.not_agreed_terms_' + utandc.user.get_username())
|
cyface/django-termsandconditions
|
termsandconditions/signals.py
|
terms_updated
|
python
|
def terms_updated(sender, **kwargs):
LOGGER.debug("T&C Updated Signal Handler")
cache.delete('tandc.active_terms_ids')
cache.delete('tandc.active_terms_list')
if kwargs.get('instance').slug:
cache.delete('tandc.active_terms_' + kwargs.get('instance').slug)
for utandc in UserTermsAndConditions.objects.all():
cache.delete('tandc.not_agreed_terms_' + utandc.user.get_username())
|
Called when terms and conditions is changed - to force cache clearing
|
train
|
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/signals.py#L23-L31
| null |
""" Signals for Django """
# pylint: disable=C1001,E0202,W0613
import logging
from django.core.cache import cache
from django.dispatch import receiver
from .models import TermsAndConditions, UserTermsAndConditions
from django.db.models.signals import post_delete, post_save
LOGGER = logging.getLogger(name='termsandconditions')
@receiver([post_delete, post_save], sender=UserTermsAndConditions)
def user_terms_updated(sender, **kwargs):
"""Called when user terms and conditions is changed - to force cache clearing"""
LOGGER.debug("User T&C Updated Signal Handler")
if kwargs.get('instance').user:
cache.delete('tandc.not_agreed_terms_' + kwargs.get('instance').user.get_username())
@receiver([post_delete, post_save], sender=TermsAndConditions)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx4.py
|
compute_transformed
|
python
|
def compute_transformed(context):
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters['S'].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters['I'].value,
memory_cost=kdf_parameters['M'].value // 1024,
parallelism=kdf_parameters['P'].value,
version=kdf_parameters['V'].value
)
elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']:
transformed_key = aes_kdf(
kdf_parameters['S'].value,
kdf_parameters['R'].value,
key_composite
)
else:
raise Exception('Unsupported key derivation method')
return transformed_key
|
Compute transformed key for opening database
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L31-L62
|
[
"def aes_kdf(key, rounds, password=None, keyfile=None):\n \"\"\"Set up a context for AES128-ECB encryption to find transformed_key\"\"\"\n\n cipher = AES.new(key, AES.MODE_ECB)\n key_composite = compute_key_composite(\n password=password,\n keyfile=keyfile\n )\n\n # get the number of rounds from the header and transform the key_composite\n transformed_key = key_composite\n for _ in range(0, rounds):\n transformed_key = cipher.encrypt(transformed_key)\n\n return hashlib.sha256(transformed_key).digest()\n",
"def compute_key_composite(password=None, keyfile=None):\n \"\"\"Compute composite key.\n Used in header verification and payload decryption.\"\"\"\n\n # hash the password\n if password:\n password_composite = hashlib.sha256(password.encode('utf-8')).digest()\n else:\n password_composite = b''\n # hash the keyfile\n if keyfile:\n # try to read XML keyfile\n try:\n with open(keyfile, 'r') as f:\n tree = etree.parse(f).getroot()\n keyfile_composite = base64.b64decode(tree.find('Key/Data').text)\n # otherwise, try to read plain keyfile\n except (etree.XMLSyntaxError, UnicodeDecodeError):\n try:\n with open(keyfile, 'rb') as f:\n key = f.read()\n\n try:\n int(key, 16)\n is_hex = True\n except ValueError:\n is_hex = False\n # if the length is 32 bytes we assume it is the key\n if len(key) == 32:\n keyfile_composite = key\n # if the length is 64 bytes we assume the key is hex encoded\n elif len(key) == 64 and is_hex:\n keyfile_composite = codecs.decode(key, 'hex')\n # anything else may be a file to hash for the key\n else:\n keyfile_composite = hashlib.sha256(key).digest()\n except:\n raise IOError('Could not read keyfile')\n\n else:\n keyfile_composite = b''\n\n # create composite key from password and keyfile composites\n return hashlib.sha256(password_composite + keyfile_composite).digest()\n"
] |
#!/bin/env python3
# Evan Widloski - 2018-04-11
# keepass decrypt experimentation
import struct
import hashlib
import argon2
import hmac
from construct import (
Byte, Bytes, Int32ul, RepeatUntil, GreedyBytes, Struct, this, Mapping,
Switch, Flag, Prefixed, Int64ul, Int32sl, Int64sl, GreedyString, Padding,
Peek, Checksum, Computed, IfThenElse, Pointer, Tell
)
from .common import (
aes_kdf, Concatenated, AES256Payload, ChaCha20Payload, TwoFishPayload,
DynamicDict, compute_key_composite, Reparsed, Decompressed,
compute_master, CompressionFlags, HeaderChecksumError, CredentialsError,
PayloadChecksumError, XML, CipherId, ProtectedStreamId,
ARCFourVariantStream, Salsa20Stream, ChaCha20Stream, Unprotect
)
# -------------------- Key Derivation --------------------
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
kdf_uuids = {
'argon2': b'\xefcm\xdf\x8c)DK\x91\xf7\xa9\xa4\x03\xe3\n\x0c',
'aeskdf': b'\xc9\xd9\xf3\x9ab\x8aD`\xbft\r\x08\xc1\x8aO\xea',
}
def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
#--------------- KDF Params / Plugin Data ----------------
VariantDictionaryItem = Struct(
"type" / Byte,
"key" / Prefixed(Int32ul, GreedyString('utf-8')),
"value" / Prefixed(
Int32ul,
Switch(
this.type,
{0x04: Int32ul,
0x05: Int64ul,
0x08: Flag,
0x0C: Int32sl,
0x0D: Int64sl,
0x42: GreedyBytes,
0x18: GreedyString('utf-8')
}
)
),
"next_byte" / Peek(Byte)
)
# new dynamic dictionary structure added in KDBX4
VariantDictionary = Struct(
"version" / Bytes(2),
"dict" / DynamicDict(
'key',
RepeatUntil(
lambda item,a,b: item.next_byte == 0x00,
VariantDictionaryItem
)
),
Padding(1) * "null padding"
)
# -------------------- Dynamic Header --------------------
# https://github.com/dlech/KeePass2.x/blob/dbb9d60095ef39e6abc95d708fb7d03ce5ae865e/KeePassLib/Serialization/KdbxFile.cs#L234-L246
DynamicHeaderItem = Struct(
"id" / Mapping(
Byte,
{'end': 0,
'comment': 1,
'cipher_id': 2,
'compression_flags': 3,
'master_seed': 4,
'encryption_iv': 7,
'kdf_parameters': 11,
'public_custom_data': 12
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.id,
{'compression_flags': CompressionFlags,
'kdf_parameters': VariantDictionary,
'cipher_id': CipherId
},
default=GreedyBytes
)
)
)
DynamicHeader = DynamicDict(
'id',
RepeatUntil(
lambda item, a, b: item.id == 'end',
DynamicHeaderItem
)
)
# -------------------- Payload Verification --------------------
def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest()
# -------------------- Payload Decryption/Decompression --------------------
# encrypted payload is split into multiple data blocks with hashes
EncryptedPayloadBlock = Struct(
"hmac_hash_offset" / Tell,
Padding(32),
"block_data" / Prefixed(Int32ul, GreedyBytes),
# hmac_hash has to be at the end with a pointer because it needs to
# come after other fields
"hmac_hash" / Pointer(
this.hmac_hash_offset,
Checksum(
Bytes(32),
compute_payload_block_hash,
this,
# exception=PayloadChecksumError
)
)
)
EncryptedPayload = Concatenated(RepeatUntil(
lambda item, a, b: len(item.block_data) == 0,
EncryptedPayloadBlock
))
DecryptedPayload = Switch(
this._.header.value.dynamic_header.cipher_id.data,
{'aes256': AES256Payload(EncryptedPayload),
'chacha20': ChaCha20Payload(EncryptedPayload),
'twofish': TwoFishPayload(EncryptedPayload)
}
)
InnerHeaderItem = Struct(
"type" / Mapping(
Byte,
{'end': 0x00,
'protected_stream_id': 0x01,
'protected_stream_key': 0x02,
'binary': 0x03
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.type,
{'protected_stream_id': ProtectedStreamId},
default=GreedyBytes
)
)
)
# another binary header inside decrypted and decompressed Payload
InnerHeader = DynamicDict(
'type',
RepeatUntil(lambda item,a,b: item.type == 'end', InnerHeaderItem),
#FIXME - this is a hack because inner header is not truly a dict,
# it has multiple binary elements.
lump=['binary']
)
UnpackedPayload = Reparsed(
Struct(
"inner_header" / InnerHeader,
"xml" / Unprotect(
this.inner_header.protected_stream_id.data,
this.inner_header.protected_stream_key.data,
XML(GreedyBytes)
)
)
)
# -------------------- Main KDBX Structure --------------------
Body = Struct(
"transformed_key" / Computed(compute_transformed),
"master_key" / Computed(compute_master),
"sha256" / Checksum(
Bytes(32),
lambda data: hashlib.sha256(data).digest(),
this._.header.data,
# exception=HeaderChecksumError,
),
"hmac" / Checksum(
Bytes(32),
compute_header_hmac_hash,
this,
# exception=CredentialsError,
),
"payload" / UnpackedPayload(
IfThenElse(
this._.header.value.dynamic_header.compression_flags.data.compression,
Decompressed(DecryptedPayload),
DecryptedPayload
)
)
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx4.py
|
compute_header_hmac_hash
|
python
|
def compute_header_hmac_hash(context):
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
|
Compute HMAC-SHA256 hash of header.
Used to prevent header tampering.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L64-L79
| null |
#!/bin/env python3
# Evan Widloski - 2018-04-11
# keepass decrypt experimentation
import struct
import hashlib
import argon2
import hmac
from construct import (
Byte, Bytes, Int32ul, RepeatUntil, GreedyBytes, Struct, this, Mapping,
Switch, Flag, Prefixed, Int64ul, Int32sl, Int64sl, GreedyString, Padding,
Peek, Checksum, Computed, IfThenElse, Pointer, Tell
)
from .common import (
aes_kdf, Concatenated, AES256Payload, ChaCha20Payload, TwoFishPayload,
DynamicDict, compute_key_composite, Reparsed, Decompressed,
compute_master, CompressionFlags, HeaderChecksumError, CredentialsError,
PayloadChecksumError, XML, CipherId, ProtectedStreamId,
ARCFourVariantStream, Salsa20Stream, ChaCha20Stream, Unprotect
)
# -------------------- Key Derivation --------------------
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
kdf_uuids = {
'argon2': b'\xefcm\xdf\x8c)DK\x91\xf7\xa9\xa4\x03\xe3\n\x0c',
'aeskdf': b'\xc9\xd9\xf3\x9ab\x8aD`\xbft\r\x08\xc1\x8aO\xea',
}
def compute_transformed(context):
"""Compute transformed key for opening database"""
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters['S'].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters['I'].value,
memory_cost=kdf_parameters['M'].value // 1024,
parallelism=kdf_parameters['P'].value,
version=kdf_parameters['V'].value
)
elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']:
transformed_key = aes_kdf(
kdf_parameters['S'].value,
kdf_parameters['R'].value,
key_composite
)
else:
raise Exception('Unsupported key derivation method')
return transformed_key
#--------------- KDF Params / Plugin Data ----------------
VariantDictionaryItem = Struct(
"type" / Byte,
"key" / Prefixed(Int32ul, GreedyString('utf-8')),
"value" / Prefixed(
Int32ul,
Switch(
this.type,
{0x04: Int32ul,
0x05: Int64ul,
0x08: Flag,
0x0C: Int32sl,
0x0D: Int64sl,
0x42: GreedyBytes,
0x18: GreedyString('utf-8')
}
)
),
"next_byte" / Peek(Byte)
)
# new dynamic dictionary structure added in KDBX4
VariantDictionary = Struct(
"version" / Bytes(2),
"dict" / DynamicDict(
'key',
RepeatUntil(
lambda item,a,b: item.next_byte == 0x00,
VariantDictionaryItem
)
),
Padding(1) * "null padding"
)
# -------------------- Dynamic Header --------------------
# https://github.com/dlech/KeePass2.x/blob/dbb9d60095ef39e6abc95d708fb7d03ce5ae865e/KeePassLib/Serialization/KdbxFile.cs#L234-L246
DynamicHeaderItem = Struct(
"id" / Mapping(
Byte,
{'end': 0,
'comment': 1,
'cipher_id': 2,
'compression_flags': 3,
'master_seed': 4,
'encryption_iv': 7,
'kdf_parameters': 11,
'public_custom_data': 12
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.id,
{'compression_flags': CompressionFlags,
'kdf_parameters': VariantDictionary,
'cipher_id': CipherId
},
default=GreedyBytes
)
)
)
DynamicHeader = DynamicDict(
'id',
RepeatUntil(
lambda item, a, b: item.id == 'end',
DynamicHeaderItem
)
)
# -------------------- Payload Verification --------------------
def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest()
# -------------------- Payload Decryption/Decompression --------------------
# encrypted payload is split into multiple data blocks with hashes
EncryptedPayloadBlock = Struct(
"hmac_hash_offset" / Tell,
Padding(32),
"block_data" / Prefixed(Int32ul, GreedyBytes),
# hmac_hash has to be at the end with a pointer because it needs to
# come after other fields
"hmac_hash" / Pointer(
this.hmac_hash_offset,
Checksum(
Bytes(32),
compute_payload_block_hash,
this,
# exception=PayloadChecksumError
)
)
)
EncryptedPayload = Concatenated(RepeatUntil(
lambda item, a, b: len(item.block_data) == 0,
EncryptedPayloadBlock
))
DecryptedPayload = Switch(
this._.header.value.dynamic_header.cipher_id.data,
{'aes256': AES256Payload(EncryptedPayload),
'chacha20': ChaCha20Payload(EncryptedPayload),
'twofish': TwoFishPayload(EncryptedPayload)
}
)
InnerHeaderItem = Struct(
"type" / Mapping(
Byte,
{'end': 0x00,
'protected_stream_id': 0x01,
'protected_stream_key': 0x02,
'binary': 0x03
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.type,
{'protected_stream_id': ProtectedStreamId},
default=GreedyBytes
)
)
)
# another binary header inside decrypted and decompressed Payload
InnerHeader = DynamicDict(
'type',
RepeatUntil(lambda item,a,b: item.type == 'end', InnerHeaderItem),
#FIXME - this is a hack because inner header is not truly a dict,
# it has multiple binary elements.
lump=['binary']
)
UnpackedPayload = Reparsed(
Struct(
"inner_header" / InnerHeader,
"xml" / Unprotect(
this.inner_header.protected_stream_id.data,
this.inner_header.protected_stream_key.data,
XML(GreedyBytes)
)
)
)
# -------------------- Main KDBX Structure --------------------
Body = Struct(
"transformed_key" / Computed(compute_transformed),
"master_key" / Computed(compute_master),
"sha256" / Checksum(
Bytes(32),
lambda data: hashlib.sha256(data).digest(),
this._.header.data,
# exception=HeaderChecksumError,
),
"hmac" / Checksum(
Bytes(32),
compute_header_hmac_hash,
this,
# exception=CredentialsError,
),
"payload" / UnpackedPayload(
IfThenElse(
this._.header.value.dynamic_header.compression_flags.data.compression,
Decompressed(DecryptedPayload),
DecryptedPayload
)
)
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx4.py
|
compute_payload_block_hash
|
python
|
def compute_payload_block_hash(this):
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest()
|
Compute hash of each payload block.
Used to prevent payload corruption and tampering.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L156-L171
| null |
#!/bin/env python3
# Evan Widloski - 2018-04-11
# keepass decrypt experimentation
import struct
import hashlib
import argon2
import hmac
from construct import (
Byte, Bytes, Int32ul, RepeatUntil, GreedyBytes, Struct, this, Mapping,
Switch, Flag, Prefixed, Int64ul, Int32sl, Int64sl, GreedyString, Padding,
Peek, Checksum, Computed, IfThenElse, Pointer, Tell
)
from .common import (
aes_kdf, Concatenated, AES256Payload, ChaCha20Payload, TwoFishPayload,
DynamicDict, compute_key_composite, Reparsed, Decompressed,
compute_master, CompressionFlags, HeaderChecksumError, CredentialsError,
PayloadChecksumError, XML, CipherId, ProtectedStreamId,
ARCFourVariantStream, Salsa20Stream, ChaCha20Stream, Unprotect
)
# -------------------- Key Derivation --------------------
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
kdf_uuids = {
'argon2': b'\xefcm\xdf\x8c)DK\x91\xf7\xa9\xa4\x03\xe3\n\x0c',
'aeskdf': b'\xc9\xd9\xf3\x9ab\x8aD`\xbft\r\x08\xc1\x8aO\xea',
}
def compute_transformed(context):
"""Compute transformed key for opening database"""
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters['S'].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters['I'].value,
memory_cost=kdf_parameters['M'].value // 1024,
parallelism=kdf_parameters['P'].value,
version=kdf_parameters['V'].value
)
elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']:
transformed_key = aes_kdf(
kdf_parameters['S'].value,
kdf_parameters['R'].value,
key_composite
)
else:
raise Exception('Unsupported key derivation method')
return transformed_key
def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest()
#--------------- KDF Params / Plugin Data ----------------
VariantDictionaryItem = Struct(
"type" / Byte,
"key" / Prefixed(Int32ul, GreedyString('utf-8')),
"value" / Prefixed(
Int32ul,
Switch(
this.type,
{0x04: Int32ul,
0x05: Int64ul,
0x08: Flag,
0x0C: Int32sl,
0x0D: Int64sl,
0x42: GreedyBytes,
0x18: GreedyString('utf-8')
}
)
),
"next_byte" / Peek(Byte)
)
# new dynamic dictionary structure added in KDBX4
VariantDictionary = Struct(
"version" / Bytes(2),
"dict" / DynamicDict(
'key',
RepeatUntil(
lambda item,a,b: item.next_byte == 0x00,
VariantDictionaryItem
)
),
Padding(1) * "null padding"
)
# -------------------- Dynamic Header --------------------
# https://github.com/dlech/KeePass2.x/blob/dbb9d60095ef39e6abc95d708fb7d03ce5ae865e/KeePassLib/Serialization/KdbxFile.cs#L234-L246
DynamicHeaderItem = Struct(
"id" / Mapping(
Byte,
{'end': 0,
'comment': 1,
'cipher_id': 2,
'compression_flags': 3,
'master_seed': 4,
'encryption_iv': 7,
'kdf_parameters': 11,
'public_custom_data': 12
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.id,
{'compression_flags': CompressionFlags,
'kdf_parameters': VariantDictionary,
'cipher_id': CipherId
},
default=GreedyBytes
)
)
)
DynamicHeader = DynamicDict(
'id',
RepeatUntil(
lambda item, a, b: item.id == 'end',
DynamicHeaderItem
)
)
# -------------------- Payload Verification --------------------
# -------------------- Payload Decryption/Decompression --------------------
# encrypted payload is split into multiple data blocks with hashes
EncryptedPayloadBlock = Struct(
"hmac_hash_offset" / Tell,
Padding(32),
"block_data" / Prefixed(Int32ul, GreedyBytes),
# hmac_hash has to be at the end with a pointer because it needs to
# come after other fields
"hmac_hash" / Pointer(
this.hmac_hash_offset,
Checksum(
Bytes(32),
compute_payload_block_hash,
this,
# exception=PayloadChecksumError
)
)
)
EncryptedPayload = Concatenated(RepeatUntil(
lambda item, a, b: len(item.block_data) == 0,
EncryptedPayloadBlock
))
DecryptedPayload = Switch(
this._.header.value.dynamic_header.cipher_id.data,
{'aes256': AES256Payload(EncryptedPayload),
'chacha20': ChaCha20Payload(EncryptedPayload),
'twofish': TwoFishPayload(EncryptedPayload)
}
)
InnerHeaderItem = Struct(
"type" / Mapping(
Byte,
{'end': 0x00,
'protected_stream_id': 0x01,
'protected_stream_key': 0x02,
'binary': 0x03
}
),
"data" / Prefixed(
Int32ul,
Switch(
this.type,
{'protected_stream_id': ProtectedStreamId},
default=GreedyBytes
)
)
)
# another binary header inside decrypted and decompressed Payload
InnerHeader = DynamicDict(
'type',
RepeatUntil(lambda item,a,b: item.type == 'end', InnerHeaderItem),
#FIXME - this is a hack because inner header is not truly a dict,
# it has multiple binary elements.
lump=['binary']
)
UnpackedPayload = Reparsed(
Struct(
"inner_header" / InnerHeader,
"xml" / Unprotect(
this.inner_header.protected_stream_id.data,
this.inner_header.protected_stream_key.data,
XML(GreedyBytes)
)
)
)
# -------------------- Main KDBX Structure --------------------
Body = Struct(
"transformed_key" / Computed(compute_transformed),
"master_key" / Computed(compute_master),
"sha256" / Checksum(
Bytes(32),
lambda data: hashlib.sha256(data).digest(),
this._.header.data,
# exception=HeaderChecksumError,
),
"hmac" / Checksum(
Bytes(32),
compute_header_hmac_hash,
this,
# exception=CredentialsError,
),
"payload" / UnpackedPayload(
IfThenElse(
this._.header.value.dynamic_header.compression_flags.data.compression,
Decompressed(DecryptedPayload),
DecryptedPayload
)
)
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/kdbx3.py
|
compute_transformed
|
python
|
def compute_transformed(context):
if context._._.transformed_key is not None:
transformed_key = context._._transformed_key
else:
transformed_key = aes_kdf(
context._.header.value.dynamic_header.transform_seed.data,
context._.header.value.dynamic_header.transform_rounds.data,
password=context._._.password,
keyfile=context._._.keyfile
)
return transformed_key
|
Compute transformed key for opening database
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx3.py#L26-L39
|
[
"def aes_kdf(key, rounds, password=None, keyfile=None):\n \"\"\"Set up a context for AES128-ECB encryption to find transformed_key\"\"\"\n\n cipher = AES.new(key, AES.MODE_ECB)\n key_composite = compute_key_composite(\n password=password,\n keyfile=keyfile\n )\n\n # get the number of rounds from the header and transform the key_composite\n transformed_key = key_composite\n for _ in range(0, rounds):\n transformed_key = cipher.encrypt(transformed_key)\n\n return hashlib.sha256(transformed_key).digest()\n"
] |
#!/bin/env python3
# Evan Widloski - 2018-04-11
# keepass decrypt experimentation
import hashlib
from construct import (
Byte, Bytes, Int16ul, Int32ul, RepeatUntil, GreedyBytes, Struct, this,
Mapping, Switch, Prefixed, Padding, Checksum, Computed, IfThenElse,
Pointer, Tell, len_
)
from .common import (
aes_kdf, AES256Payload, ChaCha20Payload, TwoFishPayload, Concatenated,
DynamicDict, compute_key_composite, Decompressed, Reparsed,
compute_master, CompressionFlags, CredentialsError, PayloadChecksumError,
XML, CipherId, ProtectedStreamId, Unprotect
)
# -------------------- Key Derivation --------------------
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
kdf_uuids = {
'aes': b'\xc9\xd9\xf3\x9ab\x8aD`\xbft\r\x08\xc1\x8aO\xea',
}
# -------------------- Dynamic Header --------------------
# https://github.com/dlech/KeePass2.x/blob/dbb9d60095ef39e6abc95d708fb7d03ce5ae865e/KeePassLib/Serialization/KdbxFile.cs#L234-L246
DynamicHeaderItem = Struct(
"id" / Mapping(
Byte,
{'end': 0,
'comment': 1,
'cipher_id': 2,
'compression_flags': 3,
'master_seed': 4,
'transform_seed': 5,
'transform_rounds': 6,
'encryption_iv': 7,
'protected_stream_key': 8,
'stream_start_bytes': 9,
'protected_stream_id': 10,
}
),
"data" / Prefixed(
Int16ul,
Switch(
this.id,
{'compression_flags': CompressionFlags,
'cipher_id': CipherId,
'transform_rounds': Int32ul,
'protected_stream_id': ProtectedStreamId
},
default=GreedyBytes
)
),
)
DynamicHeader = DynamicDict(
'id',
RepeatUntil(
lambda item, a, b: item.id == 'end',
DynamicHeaderItem
)
)
# -------------------- Payload Verification --------------------
# encrypted payload is split into multiple data blocks with hashes
PayloadBlock = Struct(
"block_index" / Checksum(
Int32ul,
lambda this: this._index,
this
),
"block_hash_offset" / Tell,
Padding(32),
"block_data" / Prefixed(Int32ul, GreedyBytes),
# block_hash has to be at the end with a pointer because it needs to
# come after other fields
"block_hash" / Pointer(
this.block_hash_offset,
IfThenElse(
len_(this.block_data) == 0,
Checksum(
Bytes(32),
lambda _: b'\x00' * 32,
this
),
Checksum(
Bytes(32),
lambda block_data: hashlib.sha256(block_data).digest(),
this.block_data,
# exception=PayloadChecksumError
)
)
),
)
PayloadBlocks = RepeatUntil(
lambda item, a, b: len(item.block_data) == 0, # and item.block_hash == b'\x00' * 32,
PayloadBlock
)
# -------------------- Payload Decryption/Decompression --------------------
# Compressed Bytes <---> Stream Start Bytes, Decompressed XML
UnpackedPayload = Reparsed(
Struct(
# validate payload decryption
Checksum(
Bytes(32),
lambda this: this._._.header.value.dynamic_header.stream_start_bytes.data,
this,
# exception=CredentialsError
),
"xml" / Unprotect(
this._._.header.value.dynamic_header.protected_stream_id.data,
this._._.header.value.dynamic_header.protected_stream_key.data,
XML(
IfThenElse(
this._._.header.value.dynamic_header.compression_flags.data.compression,
Decompressed(Concatenated(PayloadBlocks)),
Concatenated(PayloadBlocks)
)
)
)
)
)
# -------------------- Main KDBX Structure --------------------
Body = Struct(
"transformed_key" / Computed(compute_transformed),
"master_key" / Computed(compute_master),
"payload" / UnpackedPayload(
Switch(
this._.header.value.dynamic_header.cipher_id.data,
{'aes256': AES256Payload(GreedyBytes),
'chacha20': ChaCha20Payload(GreedyBytes),
'twofish': TwoFishPayload(GreedyBytes),
}
)
),
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/pytwofish.py
|
Twofish.set_key
|
python
|
def set_key(self, key):
key_len = len(key)
if key_len not in [16, 24, 32]:
# XXX: add padding?
raise KeyError("key must be 16, 24 or 32 bytes")
if key_len % 4:
# XXX: add padding?
raise KeyError("key not a multiple of 4")
if key_len > 32:
# XXX: prune?
raise KeyError("key_len > 32")
self.context = TWI()
key_word32 = [0] * 32
i = 0
while key:
key_word32[i] = struct.unpack("<L", key[0:4])[0]
key = key[4:]
i += 1
set_key(self.context, key_word32, key_len)
|
Init.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L55-L78
|
[
"def set_key(pkey, in_key, key_len):\n pkey.qt_gen = 0\n if not pkey.qt_gen:\n gen_qtab(pkey)\n pkey.qt_gen = 1\n pkey.mt_gen = 0\n if not pkey.mt_gen:\n gen_mtab(pkey)\n pkey.mt_gen = 1\n pkey.k_len = int((key_len * 8) / 64)\n\n a = 0\n b = 0\n me_key = [0,0,0,0]\n mo_key = [0,0,0,0]\n for i in range(pkey.k_len):\n if WORD_BIGENDIAN:\n a = byteswap32(in_key[i + 1])\n me_key[i] = a\n b = byteswap32(in_key[i + i + 1])\n else:\n a = in_key[i + i]\n me_key[i] = a\n b = in_key[i + i + 1]\n mo_key[i] = b\n pkey.s_key[pkey.k_len - i - 1] = mds_rem(a, b);\n for i in range(0, 40, 2):\n a = (0x01010101 * i) % 0x100000000;\n b = (a + 0x01010101) % 0x100000000;\n a = h_fun(pkey, a, me_key);\n b = rotl32(h_fun(pkey, b, mo_key), 8);\n pkey.l_key[i] = (a + b) % 0x100000000;\n pkey.l_key[i + 1] = rotl32((a + 2 * b) % 0x100000000, 9);\n gen_mk_tab(pkey, pkey.s_key)\n"
] |
class Twofish:
def __init__(self, key=None):
"""Twofish."""
if key:
self.set_key(key)
def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
def get_name(self):
"""Return the name of the cipher."""
return "Twofish"
def get_block_size(self):
"""Get cipher block size in bytes."""
return block_size
def get_key_size(self):
"""Get cipher key size in bytes."""
return key_size
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/pytwofish.py
|
Twofish.decrypt
|
python
|
def decrypt(self, block):
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
|
Decrypt blocks.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L81-L96
|
[
"def decrypt(pkey, in_blk):\n blk = [0, 0, 0, 0]\n\n if WORD_BIGENDIAN:\n blk[0] = byteswap32(in_blk[0]) ^ pkey.l_key[4];\n blk[1] = byteswap32(in_blk[1]) ^ pkey.l_key[5];\n blk[2] = byteswap32(in_blk[2]) ^ pkey.l_key[6];\n blk[3] = byteswap32(in_blk[3]) ^ pkey.l_key[7];\n else:\n blk[0] = in_blk[0] ^ pkey.l_key[4];\n blk[1] = in_blk[1] ^ pkey.l_key[5];\n blk[2] = in_blk[2] ^ pkey.l_key[6];\n blk[3] = in_blk[3] ^ pkey.l_key[7];\n\n for i in range(7, -1, -1):\n t1 = ( pkey.mk_tab[0][byte(blk[1],3)] ^ pkey.mk_tab[1][byte(blk[1],0)] ^ pkey.mk_tab[2][byte(blk[1],1)] ^ pkey.mk_tab[3][byte(blk[1],2)] )\n t0 = ( pkey.mk_tab[0][byte(blk[0],0)] ^ pkey.mk_tab[1][byte(blk[0],1)] ^ pkey.mk_tab[2][byte(blk[0],2)] ^ pkey.mk_tab[3][byte(blk[0],3)] )\n\n blk[2] = rotl32(blk[2], 1) ^ ((t0 + t1 + pkey.l_key[4 * (i) + 10]) % 0x100000000)\n blk[3] = rotr32(blk[3] ^ ((t0 + 2 * t1 + pkey.l_key[4 * (i) + 11]) % 0x100000000), 1)\n\n t1 = ( pkey.mk_tab[0][byte(blk[3],3)] ^ pkey.mk_tab[1][byte(blk[3],0)] ^ pkey.mk_tab[2][byte(blk[3],1)] ^ pkey.mk_tab[3][byte(blk[3],2)] )\n t0 = ( pkey.mk_tab[0][byte(blk[2],0)] ^ pkey.mk_tab[1][byte(blk[2],1)] ^ pkey.mk_tab[2][byte(blk[2],2)] ^ pkey.mk_tab[3][byte(blk[2],3)] )\n\n blk[0] = rotl32(blk[0], 1) ^ ((t0 + t1 + pkey.l_key[4 * (i) + 8]) % 0x100000000)\n blk[1] = rotr32(blk[1] ^ ((t0 + 2 * t1 + pkey.l_key[4 * (i) + 9]) % 0x100000000), 1)\n\n if WORD_BIGENDIAN:\n in_blk[0] = byteswap32(blk[2] ^ pkey.l_key[0]);\n in_blk[1] = byteswap32(blk[3] ^ pkey.l_key[1]);\n in_blk[2] = byteswap32(blk[0] ^ pkey.l_key[2]);\n in_blk[3] = byteswap32(blk[1] ^ pkey.l_key[3]);\n else:\n in_blk[0] = blk[2] ^ pkey.l_key[0];\n in_blk[1] = blk[3] ^ pkey.l_key[1];\n in_blk[2] = blk[0] ^ pkey.l_key[2];\n in_blk[3] = blk[1] ^ pkey.l_key[3];\n return\n"
] |
class Twofish:
def __init__(self, key=None):
"""Twofish."""
if key:
self.set_key(key)
def set_key(self, key):
"""Init."""
key_len = len(key)
if key_len not in [16, 24, 32]:
# XXX: add padding?
raise KeyError("key must be 16, 24 or 32 bytes")
if key_len % 4:
# XXX: add padding?
raise KeyError("key not a multiple of 4")
if key_len > 32:
# XXX: prune?
raise KeyError("key_len > 32")
self.context = TWI()
key_word32 = [0] * 32
i = 0
while key:
key_word32[i] = struct.unpack("<L", key[0:4])[0]
key = key[4:]
i += 1
set_key(self.context, key_word32, key_len)
def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
def get_name(self):
"""Return the name of the cipher."""
return "Twofish"
def get_block_size(self):
"""Get cipher block size in bytes."""
return block_size
def get_key_size(self):
"""Get cipher key size in bytes."""
return key_size
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/pytwofish.py
|
Twofish.encrypt
|
python
|
def encrypt(self, block):
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
|
Encrypt blocks.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L99-L114
|
[
"def encrypt(pkey, in_blk):\n blk = [0, 0, 0, 0]\n\n if WORD_BIGENDIAN:\n blk[0] = byteswap32(in_blk[0]) ^ pkey.l_key[0];\n blk[1] = byteswap32(in_blk[1]) ^ pkey.l_key[1];\n blk[2] = byteswap32(in_blk[2]) ^ pkey.l_key[2];\n blk[3] = byteswap32(in_blk[3]) ^ pkey.l_key[3];\n else:\n blk[0] = in_blk[0] ^ pkey.l_key[0];\n blk[1] = in_blk[1] ^ pkey.l_key[1];\n blk[2] = in_blk[2] ^ pkey.l_key[2];\n blk[3] = in_blk[3] ^ pkey.l_key[3];\n\n for i in range(8):\n t1 = ( pkey.mk_tab[0][byte(blk[1],3)] ^ pkey.mk_tab[1][byte(blk[1],0)] ^ pkey.mk_tab[2][byte(blk[1],1)] ^ pkey.mk_tab[3][byte(blk[1],2)] );\n t0 = ( pkey.mk_tab[0][byte(blk[0],0)] ^ pkey.mk_tab[1][byte(blk[0],1)] ^ pkey.mk_tab[2][byte(blk[0],2)] ^ pkey.mk_tab[3][byte(blk[0],3)] );\n\n blk[2] = rotr32(blk[2] ^ ((t0 + t1 + pkey.l_key[4 * (i) + 8]) % 0x100000000), 1);\n blk[3] = rotl32(blk[3], 1) ^ ((t0 + 2 * t1 + pkey.l_key[4 * (i) + 9]) % 0x100000000);\n\n t1 = ( pkey.mk_tab[0][byte(blk[3],3)] ^ pkey.mk_tab[1][byte(blk[3],0)] ^ pkey.mk_tab[2][byte(blk[3],1)] ^ pkey.mk_tab[3][byte(blk[3],2)] );\n t0 = ( pkey.mk_tab[0][byte(blk[2],0)] ^ pkey.mk_tab[1][byte(blk[2],1)] ^ pkey.mk_tab[2][byte(blk[2],2)] ^ pkey.mk_tab[3][byte(blk[2],3)] );\n\n blk[0] = rotr32(blk[0] ^ ((t0 + t1 + pkey.l_key[4 * (i) + 10]) % 0x100000000), 1);\n blk[1] = rotl32(blk[1], 1) ^ ((t0 + 2 * t1 + pkey.l_key[4 * (i) + 11]) % 0x100000000);\n\n if WORD_BIGENDIAN:\n in_blk[0] = byteswap32(blk[2] ^ pkey.l_key[4]);\n in_blk[1] = byteswap32(blk[3] ^ pkey.l_key[5]);\n in_blk[2] = byteswap32(blk[0] ^ pkey.l_key[6]);\n in_blk[3] = byteswap32(blk[1] ^ pkey.l_key[7]);\n else:\n in_blk[0] = blk[2] ^ pkey.l_key[4];\n in_blk[1] = blk[3] ^ pkey.l_key[5];\n in_blk[2] = blk[0] ^ pkey.l_key[6];\n in_blk[3] = blk[1] ^ pkey.l_key[7];\n\n return\n"
] |
class Twofish:
def __init__(self, key=None):
"""Twofish."""
if key:
self.set_key(key)
def set_key(self, key):
"""Init."""
key_len = len(key)
if key_len not in [16, 24, 32]:
# XXX: add padding?
raise KeyError("key must be 16, 24 or 32 bytes")
if key_len % 4:
# XXX: add padding?
raise KeyError("key not a multiple of 4")
if key_len > 32:
# XXX: prune?
raise KeyError("key_len > 32")
self.context = TWI()
key_word32 = [0] * 32
i = 0
while key:
key_word32[i] = struct.unpack("<L", key[0:4])[0]
key = key[4:]
i += 1
set_key(self.context, key_word32, key_len)
def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
def get_name(self):
"""Return the name of the cipher."""
return "Twofish"
def get_block_size(self):
"""Get cipher block size in bytes."""
return block_size
def get_key_size(self):
"""Get cipher key size in bytes."""
return key_size
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
aes_kdf
|
python
|
def aes_kdf(key, rounds, password=None, keyfile=None):
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
|
Set up a context for AES128-ECB encryption to find transformed_key
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L84-L98
|
[
"def compute_key_composite(password=None, keyfile=None):\n \"\"\"Compute composite key.\n Used in header verification and payload decryption.\"\"\"\n\n # hash the password\n if password:\n password_composite = hashlib.sha256(password.encode('utf-8')).digest()\n else:\n password_composite = b''\n # hash the keyfile\n if keyfile:\n # try to read XML keyfile\n try:\n with open(keyfile, 'r') as f:\n tree = etree.parse(f).getroot()\n keyfile_composite = base64.b64decode(tree.find('Key/Data').text)\n # otherwise, try to read plain keyfile\n except (etree.XMLSyntaxError, UnicodeDecodeError):\n try:\n with open(keyfile, 'rb') as f:\n key = f.read()\n\n try:\n int(key, 16)\n is_hex = True\n except ValueError:\n is_hex = False\n # if the length is 32 bytes we assume it is the key\n if len(key) == 32:\n keyfile_composite = key\n # if the length is 64 bytes we assume the key is hex encoded\n elif len(key) == 64 and is_hex:\n keyfile_composite = codecs.decode(key, 'hex')\n # anything else may be a file to hash for the key\n else:\n keyfile_composite = hashlib.sha256(key).digest()\n except:\n raise IOError('Could not read keyfile')\n\n else:\n keyfile_composite = b''\n\n # create composite key from password and keyfile composites\n return hashlib.sha256(password_composite + keyfile_composite).digest()\n"
] |
from Crypto.Cipher import AES, ChaCha20, Salsa20
from .twofish import Twofish
from Crypto.Util import Padding as CryptoPadding
import hashlib
from construct import (
Adapter, BitStruct, BitsSwapped, Container, Flag, Padding, RepeatUntil,
Subconstruct, Construct, ListContainer, Mapping, GreedyBytes, Int32ul,
Switch
)
from lxml import etree
import base64
import unicodedata
import zlib
import codecs
from io import BytesIO
from collections import OrderedDict
class HeaderChecksumError(Exception):
pass
class CredentialsError(Exception):
pass
class PayloadChecksumError(Exception):
pass
class DynamicDict(Adapter):
"""ListContainer <---> Container
Convenience mapping so we dont have to iterate ListContainer to find
the right item
FIXME: lump kwarg was added to get around the fact that InnerHeader is
not truly a dict. We lump all 'binary' InnerHeaderItems into a single list
"""
def __init__(self, key, subcon, lump=[]):
super(DynamicDict, self).__init__(subcon)
self.key = key
self.lump = lump
# map ListContainer to Container
def _decode(self, obj, context, path):
d = OrderedDict()
for l in self.lump:
d[l] = ListContainer([])
for item in obj:
if item[self.key] in self.lump:
d[item[self.key]].append(item)
else:
d[item[self.key]] = item
return Container(d)
# map Container to ListContainer
def _encode(self, obj, context, path):
l = []
for key in obj:
if key in self.lump:
l += obj[key]
else:
l.append(obj[key])
return ListContainer(l)
def Reparsed(subcon_out):
class Reparsed(Adapter):
"""Bytes <---> Parsed subcon result
Takes in bytes and reparses it with subcon_out"""
def _decode(self, data, con, path):
return subcon_out.parse(data, **con)
def _encode(self, obj, con, path):
return subcon_out.build(obj, **con)
return Reparsed
# is the payload compressed?
CompressionFlags = BitsSwapped(
BitStruct("compression" / Flag, Padding(8 * 4 - 1))
)
# -------------------- Key Computation --------------------
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
# -------------------- XML Processing --------------------
class XML(Adapter):
"""Bytes <---> lxml etree"""
def _decode(self, data, con, path):
return etree.parse(BytesIO(data))
def _encode(self, tree, con, path):
return etree.tostring(tree)
class UnprotectedStream(Adapter):
"""lxml etree <---> unprotected lxml etree
Iterate etree for Protected elements and decrypt using cipher
provided by get_cipher"""
protected_xpath = '//Value[@Protected=\'True\']'
unprotected_xpath = '//Value[@Protected=\'False\']'
def __init__(self, protected_stream_key, subcon):
super(UnprotectedStream, self).__init__(subcon)
self.protected_stream_key = protected_stream_key
def _decode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.protected_xpath):
if elem.text is not None:
elem.text = ''.join(c for c in cipher.decrypt(
base64.b64decode(
elem.text
)
).decode('utf-8') if unicodedata.category(c)[0] != "C")
elem.attrib['Protected'] = 'False'
return tree
def _encode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.unprotected_xpath):
if elem.text is not None:
elem.text = base64.b64encode(
cipher.encrypt(
elem.text.encode('utf-8')
)
)
elem.attrib['Protected'] = 'True'
return tree
class ARCFourVariantStream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
raise Exception("ARCFourVariant not implemented")
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L115-L119
class Salsa20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key = hashlib.sha256(protected_stream_key).digest()
return Salsa20.new(
key=key,
nonce=b'\xE8\x30\x09\x4B\x97\x20\x5D\x2A'
)
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L103-L111
class ChaCha20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key_hash = hashlib.sha512(protected_stream_key).digest()
key = key_hash[:32]
nonce = key_hash[32:44]
return ChaCha20.new(
key=key,
nonce=nonce
)
def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
# -------------------- Payload Encryption/Decompression --------------------
class Concatenated(Adapter):
"""Data Blocks <---> Bytes"""
def _decode(self, blocks, con, path):
return b''.join([block.block_data for block in blocks])
def _encode(self, payload_data, con, path):
blocks = []
# split payload_data into 1 MB blocks (spec default)
i = 0
while i < len(payload_data):
blocks.append(Container(block_data=payload_data[i:i + 2**20]))
i += 2**20
blocks.append(Container(block_data=b''))
return blocks
class DecryptedPayload(Adapter):
"""Encrypted Bytes <---> Decrypted Bytes"""
def _decode(self, payload_data, con, path):
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.decrypt(payload_data)
return payload_data
def _encode(self, payload_data, con, path):
payload_data = CryptoPadding.pad(payload_data, 16)
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.encrypt(payload_data)
return payload_data
class AES256Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return AES.new(master_key, AES.MODE_CBC, encryption_iv)
class ChaCha20Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return ChaCha20.new(key=master_key, nonce=encryption_iv)
class TwoFishPayload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return Twofish.new(master_key, mode=Twofish.MODE_CBC, IV=encryption_iv)
class Decompressed(Adapter):
"""Compressed Bytes <---> Decompressed Bytes"""
def _decode(self, data, con, path):
return zlib.decompress(data, 16 + 15)
def _encode(self, data, con, path):
compressobj = zlib.compressobj(
6,
zlib.DEFLATED,
16 + 15,
zlib.DEF_MEM_LEVEL,
0
)
data = compressobj.compress(data)
data += compressobj.flush()
return data
# -------------------- Cipher Enums --------------------
# payload encryption method
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
CipherId = Mapping(
GreedyBytes,
{'aes256': b'1\xc1\xf2\xe6\xbfqCP\xbeX\x05!j\xfcZ\xff',
'twofish': b'\xadh\xf2\x9fWoK\xb9\xa3j\xd4z\xf9e4l',
'chacha20': b'\xd6\x03\x8a+\x8boL\xb5\xa5$3\x9a1\xdb\xb5\x9a'
}
)
# protected entry encryption method
# https://github.com/dlech/KeePass2.x/blob/149ab342338ffade24b44aaa1fd89f14b64fda09/KeePassLib/Cryptography/CryptoRandomStream.cs#L35
ProtectedStreamId = Mapping(
Int32ul,
{'none': 0,
'arcfourvariant': 1,
'salsa20': 2,
'chacha20': 3,
}
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
compute_key_composite
|
python
|
def compute_key_composite(password=None, keyfile=None):
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
|
Compute composite key.
Used in header verification and payload decryption.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L101-L144
| null |
from Crypto.Cipher import AES, ChaCha20, Salsa20
from .twofish import Twofish
from Crypto.Util import Padding as CryptoPadding
import hashlib
from construct import (
Adapter, BitStruct, BitsSwapped, Container, Flag, Padding, RepeatUntil,
Subconstruct, Construct, ListContainer, Mapping, GreedyBytes, Int32ul,
Switch
)
from lxml import etree
import base64
import unicodedata
import zlib
import codecs
from io import BytesIO
from collections import OrderedDict
class HeaderChecksumError(Exception):
pass
class CredentialsError(Exception):
pass
class PayloadChecksumError(Exception):
pass
class DynamicDict(Adapter):
"""ListContainer <---> Container
Convenience mapping so we dont have to iterate ListContainer to find
the right item
FIXME: lump kwarg was added to get around the fact that InnerHeader is
not truly a dict. We lump all 'binary' InnerHeaderItems into a single list
"""
def __init__(self, key, subcon, lump=[]):
super(DynamicDict, self).__init__(subcon)
self.key = key
self.lump = lump
# map ListContainer to Container
def _decode(self, obj, context, path):
d = OrderedDict()
for l in self.lump:
d[l] = ListContainer([])
for item in obj:
if item[self.key] in self.lump:
d[item[self.key]].append(item)
else:
d[item[self.key]] = item
return Container(d)
# map Container to ListContainer
def _encode(self, obj, context, path):
l = []
for key in obj:
if key in self.lump:
l += obj[key]
else:
l.append(obj[key])
return ListContainer(l)
def Reparsed(subcon_out):
class Reparsed(Adapter):
"""Bytes <---> Parsed subcon result
Takes in bytes and reparses it with subcon_out"""
def _decode(self, data, con, path):
return subcon_out.parse(data, **con)
def _encode(self, obj, con, path):
return subcon_out.build(obj, **con)
return Reparsed
# is the payload compressed?
CompressionFlags = BitsSwapped(
BitStruct("compression" / Flag, Padding(8 * 4 - 1))
)
# -------------------- Key Computation --------------------
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
# -------------------- XML Processing --------------------
class XML(Adapter):
"""Bytes <---> lxml etree"""
def _decode(self, data, con, path):
return etree.parse(BytesIO(data))
def _encode(self, tree, con, path):
return etree.tostring(tree)
class UnprotectedStream(Adapter):
"""lxml etree <---> unprotected lxml etree
Iterate etree for Protected elements and decrypt using cipher
provided by get_cipher"""
protected_xpath = '//Value[@Protected=\'True\']'
unprotected_xpath = '//Value[@Protected=\'False\']'
def __init__(self, protected_stream_key, subcon):
super(UnprotectedStream, self).__init__(subcon)
self.protected_stream_key = protected_stream_key
def _decode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.protected_xpath):
if elem.text is not None:
elem.text = ''.join(c for c in cipher.decrypt(
base64.b64decode(
elem.text
)
).decode('utf-8') if unicodedata.category(c)[0] != "C")
elem.attrib['Protected'] = 'False'
return tree
def _encode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.unprotected_xpath):
if elem.text is not None:
elem.text = base64.b64encode(
cipher.encrypt(
elem.text.encode('utf-8')
)
)
elem.attrib['Protected'] = 'True'
return tree
class ARCFourVariantStream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
raise Exception("ARCFourVariant not implemented")
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L115-L119
class Salsa20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key = hashlib.sha256(protected_stream_key).digest()
return Salsa20.new(
key=key,
nonce=b'\xE8\x30\x09\x4B\x97\x20\x5D\x2A'
)
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L103-L111
class ChaCha20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key_hash = hashlib.sha512(protected_stream_key).digest()
key = key_hash[:32]
nonce = key_hash[32:44]
return ChaCha20.new(
key=key,
nonce=nonce
)
def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
# -------------------- Payload Encryption/Decompression --------------------
class Concatenated(Adapter):
"""Data Blocks <---> Bytes"""
def _decode(self, blocks, con, path):
return b''.join([block.block_data for block in blocks])
def _encode(self, payload_data, con, path):
blocks = []
# split payload_data into 1 MB blocks (spec default)
i = 0
while i < len(payload_data):
blocks.append(Container(block_data=payload_data[i:i + 2**20]))
i += 2**20
blocks.append(Container(block_data=b''))
return blocks
class DecryptedPayload(Adapter):
"""Encrypted Bytes <---> Decrypted Bytes"""
def _decode(self, payload_data, con, path):
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.decrypt(payload_data)
return payload_data
def _encode(self, payload_data, con, path):
payload_data = CryptoPadding.pad(payload_data, 16)
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.encrypt(payload_data)
return payload_data
class AES256Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return AES.new(master_key, AES.MODE_CBC, encryption_iv)
class ChaCha20Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return ChaCha20.new(key=master_key, nonce=encryption_iv)
class TwoFishPayload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return Twofish.new(master_key, mode=Twofish.MODE_CBC, IV=encryption_iv)
class Decompressed(Adapter):
"""Compressed Bytes <---> Decompressed Bytes"""
def _decode(self, data, con, path):
return zlib.decompress(data, 16 + 15)
def _encode(self, data, con, path):
compressobj = zlib.compressobj(
6,
zlib.DEFLATED,
16 + 15,
zlib.DEF_MEM_LEVEL,
0
)
data = compressobj.compress(data)
data += compressobj.flush()
return data
# -------------------- Cipher Enums --------------------
# payload encryption method
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
CipherId = Mapping(
GreedyBytes,
{'aes256': b'1\xc1\xf2\xe6\xbfqCP\xbeX\x05!j\xfcZ\xff',
'twofish': b'\xadh\xf2\x9fWoK\xb9\xa3j\xd4z\xf9e4l',
'chacha20': b'\xd6\x03\x8a+\x8boL\xb5\xa5$3\x9a1\xdb\xb5\x9a'
}
)
# protected entry encryption method
# https://github.com/dlech/KeePass2.x/blob/149ab342338ffade24b44aaa1fd89f14b64fda09/KeePassLib/Cryptography/CryptoRandomStream.cs#L35
ProtectedStreamId = Mapping(
Int32ul,
{'none': 0,
'arcfourvariant': 1,
'salsa20': 2,
'chacha20': 3,
}
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
compute_master
|
python
|
def compute_master(context):
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
|
Computes master key from transformed key and master seed.
Used in payload decryption.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L146-L154
| null |
from Crypto.Cipher import AES, ChaCha20, Salsa20
from .twofish import Twofish
from Crypto.Util import Padding as CryptoPadding
import hashlib
from construct import (
Adapter, BitStruct, BitsSwapped, Container, Flag, Padding, RepeatUntil,
Subconstruct, Construct, ListContainer, Mapping, GreedyBytes, Int32ul,
Switch
)
from lxml import etree
import base64
import unicodedata
import zlib
import codecs
from io import BytesIO
from collections import OrderedDict
class HeaderChecksumError(Exception):
pass
class CredentialsError(Exception):
pass
class PayloadChecksumError(Exception):
pass
class DynamicDict(Adapter):
"""ListContainer <---> Container
Convenience mapping so we dont have to iterate ListContainer to find
the right item
FIXME: lump kwarg was added to get around the fact that InnerHeader is
not truly a dict. We lump all 'binary' InnerHeaderItems into a single list
"""
def __init__(self, key, subcon, lump=[]):
super(DynamicDict, self).__init__(subcon)
self.key = key
self.lump = lump
# map ListContainer to Container
def _decode(self, obj, context, path):
d = OrderedDict()
for l in self.lump:
d[l] = ListContainer([])
for item in obj:
if item[self.key] in self.lump:
d[item[self.key]].append(item)
else:
d[item[self.key]] = item
return Container(d)
# map Container to ListContainer
def _encode(self, obj, context, path):
l = []
for key in obj:
if key in self.lump:
l += obj[key]
else:
l.append(obj[key])
return ListContainer(l)
def Reparsed(subcon_out):
class Reparsed(Adapter):
"""Bytes <---> Parsed subcon result
Takes in bytes and reparses it with subcon_out"""
def _decode(self, data, con, path):
return subcon_out.parse(data, **con)
def _encode(self, obj, con, path):
return subcon_out.build(obj, **con)
return Reparsed
# is the payload compressed?
CompressionFlags = BitsSwapped(
BitStruct("compression" / Flag, Padding(8 * 4 - 1))
)
# -------------------- Key Computation --------------------
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
# -------------------- XML Processing --------------------
class XML(Adapter):
"""Bytes <---> lxml etree"""
def _decode(self, data, con, path):
return etree.parse(BytesIO(data))
def _encode(self, tree, con, path):
return etree.tostring(tree)
class UnprotectedStream(Adapter):
"""lxml etree <---> unprotected lxml etree
Iterate etree for Protected elements and decrypt using cipher
provided by get_cipher"""
protected_xpath = '//Value[@Protected=\'True\']'
unprotected_xpath = '//Value[@Protected=\'False\']'
def __init__(self, protected_stream_key, subcon):
super(UnprotectedStream, self).__init__(subcon)
self.protected_stream_key = protected_stream_key
def _decode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.protected_xpath):
if elem.text is not None:
elem.text = ''.join(c for c in cipher.decrypt(
base64.b64decode(
elem.text
)
).decode('utf-8') if unicodedata.category(c)[0] != "C")
elem.attrib['Protected'] = 'False'
return tree
def _encode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.unprotected_xpath):
if elem.text is not None:
elem.text = base64.b64encode(
cipher.encrypt(
elem.text.encode('utf-8')
)
)
elem.attrib['Protected'] = 'True'
return tree
class ARCFourVariantStream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
raise Exception("ARCFourVariant not implemented")
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L115-L119
class Salsa20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key = hashlib.sha256(protected_stream_key).digest()
return Salsa20.new(
key=key,
nonce=b'\xE8\x30\x09\x4B\x97\x20\x5D\x2A'
)
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L103-L111
class ChaCha20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key_hash = hashlib.sha512(protected_stream_key).digest()
key = key_hash[:32]
nonce = key_hash[32:44]
return ChaCha20.new(
key=key,
nonce=nonce
)
def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
# -------------------- Payload Encryption/Decompression --------------------
class Concatenated(Adapter):
"""Data Blocks <---> Bytes"""
def _decode(self, blocks, con, path):
return b''.join([block.block_data for block in blocks])
def _encode(self, payload_data, con, path):
blocks = []
# split payload_data into 1 MB blocks (spec default)
i = 0
while i < len(payload_data):
blocks.append(Container(block_data=payload_data[i:i + 2**20]))
i += 2**20
blocks.append(Container(block_data=b''))
return blocks
class DecryptedPayload(Adapter):
"""Encrypted Bytes <---> Decrypted Bytes"""
def _decode(self, payload_data, con, path):
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.decrypt(payload_data)
return payload_data
def _encode(self, payload_data, con, path):
payload_data = CryptoPadding.pad(payload_data, 16)
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.encrypt(payload_data)
return payload_data
class AES256Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return AES.new(master_key, AES.MODE_CBC, encryption_iv)
class ChaCha20Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return ChaCha20.new(key=master_key, nonce=encryption_iv)
class TwoFishPayload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return Twofish.new(master_key, mode=Twofish.MODE_CBC, IV=encryption_iv)
class Decompressed(Adapter):
"""Compressed Bytes <---> Decompressed Bytes"""
def _decode(self, data, con, path):
return zlib.decompress(data, 16 + 15)
def _encode(self, data, con, path):
compressobj = zlib.compressobj(
6,
zlib.DEFLATED,
16 + 15,
zlib.DEF_MEM_LEVEL,
0
)
data = compressobj.compress(data)
data += compressobj.flush()
return data
# -------------------- Cipher Enums --------------------
# payload encryption method
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
CipherId = Mapping(
GreedyBytes,
{'aes256': b'1\xc1\xf2\xe6\xbfqCP\xbeX\x05!j\xfcZ\xff',
'twofish': b'\xadh\xf2\x9fWoK\xb9\xa3j\xd4z\xf9e4l',
'chacha20': b'\xd6\x03\x8a+\x8boL\xb5\xa5$3\x9a1\xdb\xb5\x9a'
}
)
# protected entry encryption method
# https://github.com/dlech/KeePass2.x/blob/149ab342338ffade24b44aaa1fd89f14b64fda09/KeePassLib/Cryptography/CryptoRandomStream.cs#L35
ProtectedStreamId = Mapping(
Int32ul,
{'none': 0,
'arcfourvariant': 1,
'salsa20': 2,
'chacha20': 3,
}
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
Unprotect
|
python
|
def Unprotect(protected_stream_id, protected_stream_key, subcon):
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
)
|
Select stream cipher based on protected_stream_id
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L231-L241
| null |
from Crypto.Cipher import AES, ChaCha20, Salsa20
from .twofish import Twofish
from Crypto.Util import Padding as CryptoPadding
import hashlib
from construct import (
Adapter, BitStruct, BitsSwapped, Container, Flag, Padding, RepeatUntil,
Subconstruct, Construct, ListContainer, Mapping, GreedyBytes, Int32ul,
Switch
)
from lxml import etree
import base64
import unicodedata
import zlib
import codecs
from io import BytesIO
from collections import OrderedDict
class HeaderChecksumError(Exception):
pass
class CredentialsError(Exception):
pass
class PayloadChecksumError(Exception):
pass
class DynamicDict(Adapter):
"""ListContainer <---> Container
Convenience mapping so we dont have to iterate ListContainer to find
the right item
FIXME: lump kwarg was added to get around the fact that InnerHeader is
not truly a dict. We lump all 'binary' InnerHeaderItems into a single list
"""
def __init__(self, key, subcon, lump=[]):
super(DynamicDict, self).__init__(subcon)
self.key = key
self.lump = lump
# map ListContainer to Container
def _decode(self, obj, context, path):
d = OrderedDict()
for l in self.lump:
d[l] = ListContainer([])
for item in obj:
if item[self.key] in self.lump:
d[item[self.key]].append(item)
else:
d[item[self.key]] = item
return Container(d)
# map Container to ListContainer
def _encode(self, obj, context, path):
l = []
for key in obj:
if key in self.lump:
l += obj[key]
else:
l.append(obj[key])
return ListContainer(l)
def Reparsed(subcon_out):
class Reparsed(Adapter):
"""Bytes <---> Parsed subcon result
Takes in bytes and reparses it with subcon_out"""
def _decode(self, data, con, path):
return subcon_out.parse(data, **con)
def _encode(self, obj, con, path):
return subcon_out.build(obj, **con)
return Reparsed
# is the payload compressed?
CompressionFlags = BitsSwapped(
BitStruct("compression" / Flag, Padding(8 * 4 - 1))
)
# -------------------- Key Computation --------------------
def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest()
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key
# -------------------- XML Processing --------------------
class XML(Adapter):
"""Bytes <---> lxml etree"""
def _decode(self, data, con, path):
return etree.parse(BytesIO(data))
def _encode(self, tree, con, path):
return etree.tostring(tree)
class UnprotectedStream(Adapter):
"""lxml etree <---> unprotected lxml etree
Iterate etree for Protected elements and decrypt using cipher
provided by get_cipher"""
protected_xpath = '//Value[@Protected=\'True\']'
unprotected_xpath = '//Value[@Protected=\'False\']'
def __init__(self, protected_stream_key, subcon):
super(UnprotectedStream, self).__init__(subcon)
self.protected_stream_key = protected_stream_key
def _decode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.protected_xpath):
if elem.text is not None:
elem.text = ''.join(c for c in cipher.decrypt(
base64.b64decode(
elem.text
)
).decode('utf-8') if unicodedata.category(c)[0] != "C")
elem.attrib['Protected'] = 'False'
return tree
def _encode(self, tree, con, path):
cipher = self.get_cipher(self.protected_stream_key(con))
for elem in tree.xpath(self.unprotected_xpath):
if elem.text is not None:
elem.text = base64.b64encode(
cipher.encrypt(
elem.text.encode('utf-8')
)
)
elem.attrib['Protected'] = 'True'
return tree
class ARCFourVariantStream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
raise Exception("ARCFourVariant not implemented")
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L115-L119
class Salsa20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key = hashlib.sha256(protected_stream_key).digest()
return Salsa20.new(
key=key,
nonce=b'\xE8\x30\x09\x4B\x97\x20\x5D\x2A'
)
# https://github.com/dlech/KeePass2.x/blob/97141c02733cd3abf8d4dce1187fa7959ded58a8/KeePassLib/Cryptography/CryptoRandomStream.cs#L103-L111
class ChaCha20Stream(UnprotectedStream):
def get_cipher(self, protected_stream_key):
key_hash = hashlib.sha512(protected_stream_key).digest()
key = key_hash[:32]
nonce = key_hash[32:44]
return ChaCha20.new(
key=key,
nonce=nonce
)
# -------------------- Payload Encryption/Decompression --------------------
class Concatenated(Adapter):
"""Data Blocks <---> Bytes"""
def _decode(self, blocks, con, path):
return b''.join([block.block_data for block in blocks])
def _encode(self, payload_data, con, path):
blocks = []
# split payload_data into 1 MB blocks (spec default)
i = 0
while i < len(payload_data):
blocks.append(Container(block_data=payload_data[i:i + 2**20]))
i += 2**20
blocks.append(Container(block_data=b''))
return blocks
class DecryptedPayload(Adapter):
"""Encrypted Bytes <---> Decrypted Bytes"""
def _decode(self, payload_data, con, path):
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.decrypt(payload_data)
return payload_data
def _encode(self, payload_data, con, path):
payload_data = CryptoPadding.pad(payload_data, 16)
cipher = self.get_cipher(
con.master_key,
con._.header.value.dynamic_header.encryption_iv.data
)
payload_data = cipher.encrypt(payload_data)
return payload_data
class AES256Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return AES.new(master_key, AES.MODE_CBC, encryption_iv)
class ChaCha20Payload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return ChaCha20.new(key=master_key, nonce=encryption_iv)
class TwoFishPayload(DecryptedPayload):
def get_cipher(self, master_key, encryption_iv):
return Twofish.new(master_key, mode=Twofish.MODE_CBC, IV=encryption_iv)
class Decompressed(Adapter):
"""Compressed Bytes <---> Decompressed Bytes"""
def _decode(self, data, con, path):
return zlib.decompress(data, 16 + 15)
def _encode(self, data, con, path):
compressobj = zlib.compressobj(
6,
zlib.DEFLATED,
16 + 15,
zlib.DEF_MEM_LEVEL,
0
)
data = compressobj.compress(data)
data += compressobj.flush()
return data
# -------------------- Cipher Enums --------------------
# payload encryption method
# https://github.com/keepassxreboot/keepassxc/blob/8324d03f0a015e62b6182843b4478226a5197090/src/format/KeePass2.cpp#L24-L26
CipherId = Mapping(
GreedyBytes,
{'aes256': b'1\xc1\xf2\xe6\xbfqCP\xbeX\x05!j\xfcZ\xff',
'twofish': b'\xadh\xf2\x9fWoK\xb9\xa3j\xd4z\xf9e4l',
'chacha20': b'\xd6\x03\x8a+\x8boL\xb5\xa5$3\x9a1\xdb\xb5\x9a'
}
)
# protected entry encryption method
# https://github.com/dlech/KeePass2.x/blob/149ab342338ffade24b44aaa1fd89f14b64fda09/KeePassLib/Cryptography/CryptoRandomStream.cs#L35
ProtectedStreamId = Mapping(
Int32ul,
{'none': 0,
'arcfourvariant': 1,
'salsa20': 2,
'chacha20': 3,
}
)
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.encrypt
|
python
|
def encrypt(self,plaintext,n=''):
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e')
|
Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L114-L159
|
[
"def update(self, data, ed):\n \"\"\"Processes the given ciphertext/plaintext\n\n Inputs:\n data: raw string of any length\n ed: 'e' for encryption, 'd' for decryption\n Output:\n processed raw string block(s), if any\n\n When the supplied data is not a multiple of the blocksize\n of the cipher, then the remaining input data will be cached.\n The next time the update function is called with some data,\n the new data will be concatenated to the cache and then\n cache+data will be processed and full blocks will be outputted.\n \"\"\"\n if ed == 'e':\n encrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n self.IV = self.codebook.encrypt(strxor(self.cache[i:i+self.blocksize],self.IV))\n encrypted_blocks += self.IV\n self.cache = self.cache[i+self.blocksize:]\n return encrypted_blocks\n else:\n decrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n plaintext = strxor(self.IV,self.codebook.decrypt(self.cache[i:i + self.blocksize]))\n self.IV = self.cache[i:i + self.blocksize]\n decrypted_blocks+=plaintext\n self.cache = self.cache[i+self.blocksize:]\n return decrypted_blocks\n"
] |
class BlockCipher():
""" Base class for all blockciphers
"""
MODE_ECB = MODE_ECB
MODE_CBC = MODE_CBC
MODE_CFB = MODE_CFB
MODE_OFB = MODE_OFB
MODE_CTR = MODE_CTR
MODE_XTS = MODE_XTS
MODE_CMAC = MODE_CMAC
key_error_message = "Wrong key size" #should be overwritten in child classes
def __init__(self,key,mode,IV,counter,cipher_module,segment_size,args={}):
# Cipher classes inheriting from this one take care of:
# self.blocksize
# self.cipher
self.key = key
self.mode = mode
self.cache = b''
self.ed = None
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key) and type(key) is not tuple:
raise ValueError(self.key_error_message)
if IV == None:
self.IV = b'\x00'*self.blocksize
else:
self.IV = IV
if mode != MODE_XTS:
self.cipher = cipher_module(self.key,**args)
if mode == MODE_ECB:
self.chain = ECB(self.cipher, self.blocksize)
elif mode == MODE_CBC:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
self.chain = CBC(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CFB:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
if segment_size == None:
raise ValueError("segment size must be defined explicitely for CFB mode")
if segment_size > self.blocksize*8 or segment_size%8 != 0:
# current CFB implementation doesn't support bit level acces => segment_size should be multiple of bytes
raise ValueError("segment size should be a multiple of 8 bits between 8 and %i"%(self.blocksize*8))
self.chain = CFB(self.cipher, self.blocksize,self.IV,segment_size)
elif mode == MODE_OFB:
if len(self.IV) != self.blocksize:
raise ValueError("the IV length should be %i bytes"%self.blocksize)
self.chain = OFB(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CTR:
if (counter == None) or not callable(counter):
raise Exception("Supply a valid counter object for the CTR mode")
self.chain = CTR(self.cipher,self.blocksize,counter)
elif mode == MODE_XTS:
if self.blocksize != 16:
raise Exception('XTS only works with blockcipher that have a 128-bit blocksize')
if not(type(key) == tuple and len(key) == 2):
raise Exception('Supply two keys as a tuple when using XTS')
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key[0]) or not self.keylen_valid(key[1]):
raise ValueError(self.key_error_message)
self.cipher = cipher_module(self.key[0],**args)
self.cipher2 = cipher_module(self.key[1],**args)
self.chain = XTS(self.cipher, self.cipher2)
elif mode == MODE_CMAC:
if self.blocksize not in (8,16):
raise Exception('CMAC only works with blockcipher that have a 64 or 128-bit blocksize')
self.chain = CMAC(self.cipher,self.blocksize,self.IV)
else:
raise Exception("Unknown chaining mode!")
def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.decrypt
|
python
|
def decrypt(self,ciphertext,n=''):
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
|
Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L161-L204
|
[
"def update(self, data, ed):\n \"\"\"Processes the given ciphertext/plaintext\n\n Inputs:\n data: raw string of any length\n ed: 'e' for encryption, 'd' for decryption\n Output:\n processed raw string block(s), if any\n\n When the supplied data is not a multiple of the blocksize\n of the cipher, then the remaining input data will be cached.\n The next time the update function is called with some data,\n the new data will be concatenated to the cache and then\n cache+data will be processed and full blocks will be outputted.\n \"\"\"\n if ed == 'e':\n encrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n self.IV = self.codebook.encrypt(strxor(self.cache[i:i+self.blocksize],self.IV))\n encrypted_blocks += self.IV\n self.cache = self.cache[i+self.blocksize:]\n return encrypted_blocks\n else:\n decrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n plaintext = strxor(self.IV,self.codebook.decrypt(self.cache[i:i + self.blocksize]))\n self.IV = self.cache[i:i + self.blocksize]\n decrypted_blocks+=plaintext\n self.cache = self.cache[i+self.blocksize:]\n return decrypted_blocks\n"
] |
class BlockCipher():
""" Base class for all blockciphers
"""
MODE_ECB = MODE_ECB
MODE_CBC = MODE_CBC
MODE_CFB = MODE_CFB
MODE_OFB = MODE_OFB
MODE_CTR = MODE_CTR
MODE_XTS = MODE_XTS
MODE_CMAC = MODE_CMAC
key_error_message = "Wrong key size" #should be overwritten in child classes
def __init__(self,key,mode,IV,counter,cipher_module,segment_size,args={}):
# Cipher classes inheriting from this one take care of:
# self.blocksize
# self.cipher
self.key = key
self.mode = mode
self.cache = b''
self.ed = None
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key) and type(key) is not tuple:
raise ValueError(self.key_error_message)
if IV == None:
self.IV = b'\x00'*self.blocksize
else:
self.IV = IV
if mode != MODE_XTS:
self.cipher = cipher_module(self.key,**args)
if mode == MODE_ECB:
self.chain = ECB(self.cipher, self.blocksize)
elif mode == MODE_CBC:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
self.chain = CBC(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CFB:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
if segment_size == None:
raise ValueError("segment size must be defined explicitely for CFB mode")
if segment_size > self.blocksize*8 or segment_size%8 != 0:
# current CFB implementation doesn't support bit level acces => segment_size should be multiple of bytes
raise ValueError("segment size should be a multiple of 8 bits between 8 and %i"%(self.blocksize*8))
self.chain = CFB(self.cipher, self.blocksize,self.IV,segment_size)
elif mode == MODE_OFB:
if len(self.IV) != self.blocksize:
raise ValueError("the IV length should be %i bytes"%self.blocksize)
self.chain = OFB(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CTR:
if (counter == None) or not callable(counter):
raise Exception("Supply a valid counter object for the CTR mode")
self.chain = CTR(self.cipher,self.blocksize,counter)
elif mode == MODE_XTS:
if self.blocksize != 16:
raise Exception('XTS only works with blockcipher that have a 128-bit blocksize')
if not(type(key) == tuple and len(key) == 2):
raise Exception('Supply two keys as a tuple when using XTS')
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key[0]) or not self.keylen_valid(key[1]):
raise ValueError(self.key_error_message)
self.cipher = cipher_module(self.key[0],**args)
self.cipher2 = cipher_module(self.key[1],**args)
self.chain = XTS(self.cipher, self.cipher2)
elif mode == MODE_CMAC:
if self.blocksize not in (8,16):
raise Exception('CMAC only works with blockcipher that have a 64 or 128-bit blocksize')
self.chain = CMAC(self.cipher,self.blocksize,self.IV)
else:
raise Exception("Unknown chaining mode!")
def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e')
def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
BlockCipher.final
|
python
|
def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass
|
Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L206-L237
|
[
"def update(self, data, ed):\n \"\"\"Processes the given ciphertext/plaintext\n\n Inputs:\n data: raw string of any length\n ed: 'e' for encryption, 'd' for decryption\n Output:\n processed raw string block(s), if any\n\n When the supplied data is not a multiple of the blocksize\n of the cipher, then the remaining input data will be cached.\n The next time the update function is called with some data,\n the new data will be concatenated to the cache and then\n cache+data will be processed and full blocks will be outputted.\n \"\"\"\n if ed == 'e':\n encrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n self.IV = self.codebook.encrypt(strxor(self.cache[i:i+self.blocksize],self.IV))\n encrypted_blocks += self.IV\n self.cache = self.cache[i+self.blocksize:]\n return encrypted_blocks\n else:\n decrypted_blocks = b''\n self.cache += data\n if len(self.cache) < self.blocksize:\n return b''\n for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):\n plaintext = strxor(self.IV,self.codebook.decrypt(self.cache[i:i + self.blocksize]))\n self.IV = self.cache[i:i + self.blocksize]\n decrypted_blocks+=plaintext\n self.cache = self.cache[i+self.blocksize:]\n return decrypted_blocks\n"
] |
class BlockCipher():
""" Base class for all blockciphers
"""
MODE_ECB = MODE_ECB
MODE_CBC = MODE_CBC
MODE_CFB = MODE_CFB
MODE_OFB = MODE_OFB
MODE_CTR = MODE_CTR
MODE_XTS = MODE_XTS
MODE_CMAC = MODE_CMAC
key_error_message = "Wrong key size" #should be overwritten in child classes
def __init__(self,key,mode,IV,counter,cipher_module,segment_size,args={}):
# Cipher classes inheriting from this one take care of:
# self.blocksize
# self.cipher
self.key = key
self.mode = mode
self.cache = b''
self.ed = None
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key) and type(key) is not tuple:
raise ValueError(self.key_error_message)
if IV == None:
self.IV = b'\x00'*self.blocksize
else:
self.IV = IV
if mode != MODE_XTS:
self.cipher = cipher_module(self.key,**args)
if mode == MODE_ECB:
self.chain = ECB(self.cipher, self.blocksize)
elif mode == MODE_CBC:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
self.chain = CBC(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CFB:
if len(self.IV) != self.blocksize:
raise Exception("the IV length should be %i bytes"%self.blocksize)
if segment_size == None:
raise ValueError("segment size must be defined explicitely for CFB mode")
if segment_size > self.blocksize*8 or segment_size%8 != 0:
# current CFB implementation doesn't support bit level acces => segment_size should be multiple of bytes
raise ValueError("segment size should be a multiple of 8 bits between 8 and %i"%(self.blocksize*8))
self.chain = CFB(self.cipher, self.blocksize,self.IV,segment_size)
elif mode == MODE_OFB:
if len(self.IV) != self.blocksize:
raise ValueError("the IV length should be %i bytes"%self.blocksize)
self.chain = OFB(self.cipher, self.blocksize,self.IV)
elif mode == MODE_CTR:
if (counter == None) or not callable(counter):
raise Exception("Supply a valid counter object for the CTR mode")
self.chain = CTR(self.cipher,self.blocksize,counter)
elif mode == MODE_XTS:
if self.blocksize != 16:
raise Exception('XTS only works with blockcipher that have a 128-bit blocksize')
if not(type(key) == tuple and len(key) == 2):
raise Exception('Supply two keys as a tuple when using XTS')
if 'keylen_valid' in dir(self): #wrappers for pycrypto functions don't have this function
if not self.keylen_valid(key[0]) or not self.keylen_valid(key[1]):
raise ValueError(self.key_error_message)
self.cipher = cipher_module(self.key[0],**args)
self.cipher2 = cipher_module(self.key[1],**args)
self.chain = XTS(self.cipher, self.cipher2)
elif mode == MODE_CMAC:
if self.blocksize not in (8,16):
raise Exception('CMAC only works with blockcipher that have a 64 or 128-bit blocksize')
self.chain = CMAC(self.cipher,self.blocksize,self.IV)
else:
raise Exception("Unknown chaining mode!")
def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e')
def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
|
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/twofish.py
|
CBC.update
|
python
|
def update(self, data, ed):
if ed == 'e':
encrypted_blocks = b''
self.cache += data
if len(self.cache) < self.blocksize:
return b''
for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):
self.IV = self.codebook.encrypt(strxor(self.cache[i:i+self.blocksize],self.IV))
encrypted_blocks += self.IV
self.cache = self.cache[i+self.blocksize:]
return encrypted_blocks
else:
decrypted_blocks = b''
self.cache += data
if len(self.cache) < self.blocksize:
return b''
for i in range(0, len(self.cache)-self.blocksize+1, self.blocksize):
plaintext = strxor(self.IV,self.codebook.decrypt(self.cache[i:i + self.blocksize]))
self.IV = self.cache[i:i + self.blocksize]
decrypted_blocks+=plaintext
self.cache = self.cache[i+self.blocksize:]
return decrypted_blocks
|
Processes the given ciphertext/plaintext
Inputs:
data: raw string of any length
ed: 'e' for encryption, 'd' for decryption
Output:
processed raw string block(s), if any
When the supplied data is not a multiple of the blocksize
of the cipher, then the remaining input data will be cached.
The next time the update function is called with some data,
the new data will be concatenated to the cache and then
cache+data will be processed and full blocks will be outputted.
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L249-L284
| null |
class CBC:
"""CBC chaining mode
"""
def __init__(self, codebook, blocksize, IV):
self.IV = IV
self.cache = b''
self.codebook = codebook
self.blocksize = blocksize
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._datetime_to_utc
|
python
|
def _datetime_to_utc(self, dt):
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC'))
|
Convert naive datetimes to UTC
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L92-L97
| null |
class BaseElement(object):
"""Entry and Group inherit from this class"""
def __init__(self, element=None, kp=None, icon=None, expires=False,
expiry_time=None):
self._element = element
self._element.append(
E.UUID(base64.b64encode(uuid.uuid1().bytes).decode('utf-8'))
)
if icon:
self._element.append(E.IconID(icon))
current_time_str = self._encode_time(datetime.utcnow())
if expiry_time:
expiry_time_str = self._encode_time(
self._datetime_to_utc(expiry_time)
)
else:
expiry_time_str = self._encode_time(datetime.utcnow())
self._element.append(
E.Times(
E.CreationTime(current_time_str),
E.LastModificationTime(current_time_str),
E.LastAccessTime(current_time_str),
E.ExpiryTime(expiry_time_str),
E.Expires(str(expires if expires is not None else False)),
E.UsageCount(str(0)),
E.LocationChanged(current_time_str)
)
)
def _xpath(self, xpath, **kwargs):
return self._kp._xpath(xpath, tree=self._element, **kwargs)
def _get_subelement_text(self, tag):
v = self._element.find(tag)
if v is not None:
return v.text
def _set_subelement_text(self, tag, value):
v = self._element.find(tag)
if v is not None:
self._element.remove(v)
self._element.append(getattr(E, tag)(value))
@property
def group(self):
return self._xpath(
'(ancestor::Group)[last()]',
first=True,
cast=True
)
parentgroup = group
def dump_xml(self, pretty_print=False):
return etree.tostring(self._element, pretty_print=pretty_print)
@property
def uuid(self):
return self._get_subelement_text('UUID')
@uuid.setter
def uuid(self, value):
return self._set_subelement_text('UUID', value)
@property
def icon(self):
return self._get_subelement_text('IconID')
@icon.setter
def icon(self, value):
return self._set_subelement_text('IconID', value)
@property
def _path(self):
return self._element.getroottree().getpath(self._element)
def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat()
def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
def _get_times_property(self, prop):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
return self._decode_time(prop.text)
def _set_times_property(self, prop, value):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
prop.text = self._encode_time(value)
@property
def expires(self):
times = self._element.find('Times')
d = times.find('Expires').text
if d is not None:
return d == 'True'
@expires.setter
def expires(self, value):
d = self._element.find('Times').find('Expires')
d.text = 'True' if value else 'False'
@property
def expired(self):
if self.expires:
return self._datetime_to_utc(datetime.utcnow()) > self._datetime_to_utc(self.expiry_time)
return False
@property
def expiry_time(self):
return self._get_times_property('ExpiryTime')
@expiry_time.setter
def expiry_time(self, value):
self._set_times_property('ExpiryTime', value)
@property
def ctime(self):
return self._get_times_property('CreationTime')
@ctime.setter
def ctime(self, value):
self._set_times_property('CreationTime', value)
@property
def atime(self):
return self._get_times_property('LastAccessTime')
@atime.setter
def atime(self, value):
self._set_times_property('LastAccessTime', value)
@property
def mtime(self):
return self._get_times_property('LastModificationTime')
@mtime.setter
def mtime(self, value):
self._set_times_property('LastModificationTime', value)
def delete(self):
self._element.getparent().remove(self._element)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if hasattr(other, 'uuid'):
return self.uuid == other.uuid
else:
return False
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._encode_time
|
python
|
def _encode_time(self, value):
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat()
|
Convert datetime to base64 or plaintext string
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L99-L118
|
[
"def _datetime_to_utc(self, dt):\n \"\"\"Convert naive datetimes to UTC\"\"\"\n\n if not dt.tzinfo:\n dt = dt.replace(tzinfo=tz.gettz())\n return dt.astimezone(tz.gettz('UTC'))\n"
] |
class BaseElement(object):
"""Entry and Group inherit from this class"""
def __init__(self, element=None, kp=None, icon=None, expires=False,
expiry_time=None):
self._element = element
self._element.append(
E.UUID(base64.b64encode(uuid.uuid1().bytes).decode('utf-8'))
)
if icon:
self._element.append(E.IconID(icon))
current_time_str = self._encode_time(datetime.utcnow())
if expiry_time:
expiry_time_str = self._encode_time(
self._datetime_to_utc(expiry_time)
)
else:
expiry_time_str = self._encode_time(datetime.utcnow())
self._element.append(
E.Times(
E.CreationTime(current_time_str),
E.LastModificationTime(current_time_str),
E.LastAccessTime(current_time_str),
E.ExpiryTime(expiry_time_str),
E.Expires(str(expires if expires is not None else False)),
E.UsageCount(str(0)),
E.LocationChanged(current_time_str)
)
)
def _xpath(self, xpath, **kwargs):
return self._kp._xpath(xpath, tree=self._element, **kwargs)
def _get_subelement_text(self, tag):
v = self._element.find(tag)
if v is not None:
return v.text
def _set_subelement_text(self, tag, value):
v = self._element.find(tag)
if v is not None:
self._element.remove(v)
self._element.append(getattr(E, tag)(value))
@property
def group(self):
return self._xpath(
'(ancestor::Group)[last()]',
first=True,
cast=True
)
parentgroup = group
def dump_xml(self, pretty_print=False):
return etree.tostring(self._element, pretty_print=pretty_print)
@property
def uuid(self):
return self._get_subelement_text('UUID')
@uuid.setter
def uuid(self, value):
return self._set_subelement_text('UUID', value)
@property
def icon(self):
return self._get_subelement_text('IconID')
@icon.setter
def icon(self, value):
return self._set_subelement_text('IconID', value)
@property
def _path(self):
return self._element.getroottree().getpath(self._element)
def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC'))
def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
def _get_times_property(self, prop):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
return self._decode_time(prop.text)
def _set_times_property(self, prop, value):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
prop.text = self._encode_time(value)
@property
def expires(self):
times = self._element.find('Times')
d = times.find('Expires').text
if d is not None:
return d == 'True'
@expires.setter
def expires(self, value):
d = self._element.find('Times').find('Expires')
d.text = 'True' if value else 'False'
@property
def expired(self):
if self.expires:
return self._datetime_to_utc(datetime.utcnow()) > self._datetime_to_utc(self.expiry_time)
return False
@property
def expiry_time(self):
return self._get_times_property('ExpiryTime')
@expiry_time.setter
def expiry_time(self, value):
self._set_times_property('ExpiryTime', value)
@property
def ctime(self):
return self._get_times_property('CreationTime')
@ctime.setter
def ctime(self, value):
self._set_times_property('CreationTime', value)
@property
def atime(self):
return self._get_times_property('LastAccessTime')
@atime.setter
def atime(self, value):
self._set_times_property('LastAccessTime', value)
@property
def mtime(self):
return self._get_times_property('LastModificationTime')
@mtime.setter
def mtime(self, value):
self._set_times_property('LastModificationTime', value)
def delete(self):
self._element.getparent().remove(self._element)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if hasattr(other, 'uuid'):
return self.uuid == other.uuid
else:
return False
|
pschmitt/pykeepass
|
pykeepass/baseelement.py
|
BaseElement._decode_time
|
python
|
def _decode_time(self, text):
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
|
Convert base64 time or plaintext time to datetime
|
train
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L120-L141
| null |
class BaseElement(object):
"""Entry and Group inherit from this class"""
def __init__(self, element=None, kp=None, icon=None, expires=False,
expiry_time=None):
self._element = element
self._element.append(
E.UUID(base64.b64encode(uuid.uuid1().bytes).decode('utf-8'))
)
if icon:
self._element.append(E.IconID(icon))
current_time_str = self._encode_time(datetime.utcnow())
if expiry_time:
expiry_time_str = self._encode_time(
self._datetime_to_utc(expiry_time)
)
else:
expiry_time_str = self._encode_time(datetime.utcnow())
self._element.append(
E.Times(
E.CreationTime(current_time_str),
E.LastModificationTime(current_time_str),
E.LastAccessTime(current_time_str),
E.ExpiryTime(expiry_time_str),
E.Expires(str(expires if expires is not None else False)),
E.UsageCount(str(0)),
E.LocationChanged(current_time_str)
)
)
def _xpath(self, xpath, **kwargs):
return self._kp._xpath(xpath, tree=self._element, **kwargs)
def _get_subelement_text(self, tag):
v = self._element.find(tag)
if v is not None:
return v.text
def _set_subelement_text(self, tag, value):
v = self._element.find(tag)
if v is not None:
self._element.remove(v)
self._element.append(getattr(E, tag)(value))
@property
def group(self):
return self._xpath(
'(ancestor::Group)[last()]',
first=True,
cast=True
)
parentgroup = group
def dump_xml(self, pretty_print=False):
return etree.tostring(self._element, pretty_print=pretty_print)
@property
def uuid(self):
return self._get_subelement_text('UUID')
@uuid.setter
def uuid(self, value):
return self._set_subelement_text('UUID', value)
@property
def icon(self):
return self._get_subelement_text('IconID')
@icon.setter
def icon(self, value):
return self._set_subelement_text('IconID', value)
@property
def _path(self):
return self._element.getroottree().getpath(self._element)
def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC'))
def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat()
def _get_times_property(self, prop):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
return self._decode_time(prop.text)
def _set_times_property(self, prop, value):
times = self._element.find('Times')
if times is not None:
prop = times.find(prop)
if prop is not None:
prop.text = self._encode_time(value)
@property
def expires(self):
times = self._element.find('Times')
d = times.find('Expires').text
if d is not None:
return d == 'True'
@expires.setter
def expires(self, value):
d = self._element.find('Times').find('Expires')
d.text = 'True' if value else 'False'
@property
def expired(self):
if self.expires:
return self._datetime_to_utc(datetime.utcnow()) > self._datetime_to_utc(self.expiry_time)
return False
@property
def expiry_time(self):
return self._get_times_property('ExpiryTime')
@expiry_time.setter
def expiry_time(self, value):
self._set_times_property('ExpiryTime', value)
@property
def ctime(self):
return self._get_times_property('CreationTime')
@ctime.setter
def ctime(self, value):
self._set_times_property('CreationTime', value)
@property
def atime(self):
return self._get_times_property('LastAccessTime')
@atime.setter
def atime(self, value):
self._set_times_property('LastAccessTime', value)
@property
def mtime(self):
return self._get_times_property('LastModificationTime')
@mtime.setter
def mtime(self, value):
self._set_times_property('LastModificationTime', value)
def delete(self):
self._element.getparent().remove(self._element)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if hasattr(other, 'uuid'):
return self.uuid == other.uuid
else:
return False
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image.cleanup
|
python
|
def cleanup(self):
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
|
Cleanup the temporary directory
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L82-L86
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._validate_number_of_layers
|
python
|
def _validate_number_of_layers(self, number_of_layers):
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
|
Makes sure that the specified number of layers to squash
is a valid number
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L125-L140
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._files_in_layers
|
python
|
def _files_in_layers(self, layers, directory):
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
|
Prepare a list of files in all layers
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L260-L274
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._prepare_tmp_directory
|
python
|
def _prepare_tmp_directory(self, tmp_dir):
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
|
Creates temporary directory that is used to work on layers
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L276-L289
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._layers_to_squash
|
python
|
def _layers_to_squash(self, layers, from_layer):
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
|
Prepares a list of layer IDs that should be squashed
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L319-L337
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._save_image
|
python
|
def _save_image(self, image_id, directory):
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
|
Saves the image as a tar archive under specified name
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L343-L386
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._unpack
|
python
|
def _unpack(self, tar_file, directory):
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
|
Unpacks tar archive to selected directory
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L388-L397
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._read_layers
|
python
|
def _read_layers(self, layers, image_id):
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
|
Reads the JSON metadata for specified layer / image id
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L399-L403
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._parse_image_name
|
python
|
def _parse_image_name(self, image):
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
|
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L405-L418
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._dump_json
|
python
|
def _dump_json(self, data, new_line=False):
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
|
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L420-L435
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._move_layers
|
python
|
def _move_layers(self, layers, src, dest):
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
|
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L475-L485
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._marker_files
|
python
|
def _marker_files(self, tar, members):
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
|
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L501-L521
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/image.py
|
Image._add_markers
|
python
|
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
|
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L523-L582
| null |
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError("Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError("Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug("Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug("Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(target=self._extract_tar, args=(r,directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
files_to_skip = []
# Iterate over the marker files found for this particular
# layer and if in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
files_to_skip.append(self._normalize_path(actual_file))
skipped_markers[marker] = marker_file
self.log.debug(
"Searching for symbolic links in '%s' archive..." % layer_tar_file)
# Scan for all symlinks in the layer and save them
# for later processing.
for member in members:
if member.issym():
normalized_name = self._normalize_path(member.name)
skipped_sym_link_files[normalized_name] = member
continue
to_skip.append(files_to_skip)
skipped_sym_links.append(skipped_sym_link_files)
self.log.debug("Done, found %s files" %
len(skipped_sym_link_files))
skipped_files_in_layer = {}
# Copy all the files to the new tar
for member in members:
# Skip all symlinks, we'll investigate them later
if member.issym():
continue
normalized_name = self._normalize_path(member.name)
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
|
goldmann/docker-squash
|
docker_squash/lib/xtarfile.py
|
_proc_pax
|
python
|
def _proc_pax(self, filetar):
# Read the header information.
buf = filetar.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == tarfile.XGLTYPE:
pax_headers = filetar.pax_headers
else:
pax_headers = filetar.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
try:
keyword = keyword.decode("utf8")
except Exception:
pass
try:
value = value.decode("utf8")
except Exception:
pass
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(filetar)
except tarfile.HeaderError:
raise tarfile.SubsequentHeaderError("missing or bad subsequent header")
if self.type in (tarfile.XHDTYPE, tarfile.SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, filetar.encoding, filetar.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in tarfile.SUPPORTED_TYPES:
offset += next._block(next.size)
filetar.offset = offset
return next
|
Process an extended or global header as described in POSIX.1-2001.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/lib/xtarfile.py#L20-L81
| null |
"""
This is a monkey patching for Python 2 that is required to handle PAX headers
in TAR files that are not decodable to UTF8. It leaves it undecoded and when
adding back to the tar archive the header is not encoded preserving the
original headers.
Reported in RH Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1194473
Original source code was taken from Python 2.7.9.
Credit goes to Vincent Batts:
https://github.com/docker/docker-registry/pull/381
"""
import re
import tarfile
def _create_pax_generic_header(cls, pax_headers, type=tarfile.XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
try:
keyword = keyword.encode("utf8")
except Exception:
pass
try:
value = value.encode("utf8")
except Exception:
pass
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = tarfile.POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, tarfile.USTAR_FORMAT) + \
cls._create_payload(records)
tarfile.TarInfo._proc_pax = _proc_pax
tarfile.TarInfo._create_pax_generic_header = _create_pax_generic_header
|
goldmann/docker-squash
|
docker_squash/lib/xtarfile.py
|
_create_pax_generic_header
|
python
|
def _create_pax_generic_header(cls, pax_headers, type=tarfile.XHDTYPE):
records = []
for keyword, value in pax_headers.iteritems():
try:
keyword = keyword.encode("utf8")
except Exception:
pass
try:
value = value.encode("utf8")
except Exception:
pass
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = tarfile.POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, tarfile.USTAR_FORMAT) + \
cls._create_payload(records)
|
Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/lib/xtarfile.py#L84-L122
| null |
"""
This is a monkey patching for Python 2 that is required to handle PAX headers
in TAR files that are not decodable to UTF8. It leaves it undecoded and when
adding back to the tar archive the header is not encoded preserving the
original headers.
Reported in RH Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1194473
Original source code was taken from Python 2.7.9.
Credit goes to Vincent Batts:
https://github.com/docker/docker-registry/pull/381
"""
import re
import tarfile
def _proc_pax(self, filetar):
"""Process an extended or global header as described in POSIX.1-2001."""
# Read the header information.
buf = filetar.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == tarfile.XGLTYPE:
pax_headers = filetar.pax_headers
else:
pax_headers = filetar.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
try:
keyword = keyword.decode("utf8")
except Exception:
pass
try:
value = value.decode("utf8")
except Exception:
pass
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(filetar)
except tarfile.HeaderError:
raise tarfile.SubsequentHeaderError("missing or bad subsequent header")
if self.type in (tarfile.XHDTYPE, tarfile.SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, filetar.encoding, filetar.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in tarfile.SUPPORTED_TYPES:
offset += next._block(next.size)
filetar.offset = offset
return next
tarfile.TarInfo._proc_pax = _proc_pax
tarfile.TarInfo._create_pax_generic_header = _create_pax_generic_header
|
goldmann/docker-squash
|
docker_squash/v2_image.py
|
V2Image._read_json_file
|
python
|
def _read_json_file(self, json_file):
self.log.debug("Reading '%s' JSON file..." % json_file)
with open(json_file, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
|
Helper function to read JSON file as OrderedDict
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/v2_image.py#L122-L128
| null |
class V2Image(Image):
FORMAT = 'v2'
def _before_squashing(self):
super(V2Image, self)._before_squashing()
# Read old image manifest file
self.old_image_manifest = self._read_json_file(
os.path.join(self.old_image_dir, "manifest.json"))[0]
# Read old image config file
self.old_image_config = self._read_json_file(os.path.join(
self.old_image_dir, self.old_image_manifest['Config']))
# Read layer paths inside of the tar archive
# We split it into layers that needs to be squashed
# and layers that needs to be moved as-is
self.layer_paths_to_squash, self.layer_paths_to_move = self._read_layer_paths(
self.old_image_config, self.old_image_manifest, self.layers_to_move)
if self.layer_paths_to_move:
self.squash_id = self.layer_paths_to_move[-1]
def _squash(self):
if self.layer_paths_to_squash:
# Prepare the directory
os.makedirs(self.squashed_dir)
# Merge data layers
self._squash_layers(self.layer_paths_to_squash,
self.layer_paths_to_move)
self.diff_ids = self._generate_diff_ids()
self.chain_ids = self._generate_chain_ids(self.diff_ids)
metadata = self._generate_image_metadata()
image_id = self._write_image_metadata(metadata)
layer_path_id = None
if self.layer_paths_to_squash:
# Compute layer id to use to name the directory where
# we store the layer data inside of the tar archive
layer_path_id = self._generate_squashed_layer_path_id()
metadata = self._generate_last_layer_metadata(
layer_path_id, self.layer_paths_to_squash[0])
self._write_squashed_layer_metadata(metadata)
# Write version file to the squashed layer
# Even Docker doesn't know why it's needed...
self._write_version_file(self.squashed_dir)
# Move the temporary squashed layer directory to the correct one
shutil.move(self.squashed_dir, os.path.join(
self.new_image_dir, layer_path_id))
manifest = self._generate_manifest_metadata(
image_id, self.image_name, self.image_tag, self.old_image_manifest, self.layer_paths_to_move, layer_path_id)
self._write_manifest_metadata(manifest)
repository_image_id = manifest[0]["Layers"][-1].split("/")[0]
# Move all the layers that should be untouched
self._move_layers(self.layer_paths_to_move,
self.old_image_dir, self.new_image_dir)
repositories_file = os.path.join(self.new_image_dir, "repositories")
self._generate_repositories_json(
repositories_file, repository_image_id, self.image_name, self.image_tag)
return image_id
def _write_image_metadata(self, metadata):
# Create JSON from the metadata
# Docker adds new line at the end
json_metadata, image_id = self._dump_json(metadata, True)
image_metadata_file = os.path.join(
self.new_image_dir, "%s.json" % image_id)
self._write_json_metadata(json_metadata, image_metadata_file)
return image_id
def _write_squashed_layer_metadata(self, metadata):
layer_metadata_file = os.path.join(self.squashed_dir, "json")
json_metadata = self._dump_json(metadata)[0]
self._write_json_metadata(json_metadata, layer_metadata_file)
def _write_manifest_metadata(self, manifest):
manifest_file = os.path.join(self.new_image_dir, "manifest.json")
json_manifest = self._dump_json(manifest, True)[0]
self._write_json_metadata(json_manifest, manifest_file)
def _generate_manifest_metadata(self, image_id, image_name, image_tag, old_image_manifest, layer_paths_to_move, layer_path_id=None):
manifest = OrderedDict()
manifest['Config'] = "%s.json" % image_id
if image_name and image_tag:
manifest['RepoTags'] = ["%s:%s" % (image_name, image_tag)]
manifest['Layers'] = old_image_manifest[
'Layers'][:len(layer_paths_to_move)]
if layer_path_id:
manifest['Layers'].append("%s/layer.tar" % layer_path_id)
return [manifest]
def _read_layer_paths(self, old_image_config, old_image_manifest, layers_to_move):
"""
In case of v2 format, layer id's are not the same as the id's
used in the exported tar archive to name directories for layers.
These id's can be found in the configuration files saved with
the image - we need to read them.
"""
# In manifest.json we do not have listed all layers
# but only layers that do contain some data.
current_manifest_layer = 0
layer_paths_to_move = []
layer_paths_to_squash = []
# Iterate over image history, from base image to top layer
for i, layer in enumerate(old_image_config['history']):
# If it's not an empty layer get the id
# (directory name) where the layer's data is
# stored
if not layer.get('empty_layer', False):
layer_id = old_image_manifest['Layers'][
current_manifest_layer].rsplit('/')[0]
# Check if this layer should be moved or squashed
if len(layers_to_move) > i:
layer_paths_to_move.append(layer_id)
else:
layer_paths_to_squash.append(layer_id)
current_manifest_layer += 1
return layer_paths_to_squash, layer_paths_to_move
def _generate_chain_id(self, chain_ids, diff_ids, parent_chain_id):
if parent_chain_id == None:
return self._generate_chain_id(chain_ids, diff_ids[1:], diff_ids[0])
chain_ids.append(parent_chain_id)
if len(diff_ids) == 0:
return parent_chain_id
# This probably should not be hardcoded
to_hash = "sha256:%s sha256:%s" % (parent_chain_id, diff_ids[0])
digest = hashlib.sha256(str(to_hash).encode('utf8')).hexdigest()
return self._generate_chain_id(chain_ids, diff_ids[1:], digest)
def _generate_chain_ids(self, diff_ids):
chain_ids = []
self._generate_chain_id(chain_ids, diff_ids, None)
return chain_ids
def _generate_diff_ids(self):
diff_ids = []
for path in self.layer_paths_to_move:
sha256 = self._compute_sha256(os.path.join(self.old_image_dir, path, "layer.tar"))
diff_ids.append(sha256)
if self.layer_paths_to_squash:
sha256 = self._compute_sha256(os.path.join(self.squashed_dir, "layer.tar"))
diff_ids.append(sha256)
return diff_ids
def _compute_sha256(self, layer_tar):
sha256 = hashlib.sha256()
with open(layer_tar, 'rb') as f:
while True:
# Read in 10MB chunks
data = f.read(10485760)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
def _generate_squashed_layer_path_id(self):
"""
This function generates the id used to name the directory to
store the squashed layer content in the archive.
This mimics what Docker does here: https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L42
To make it simpler we do reuse old image metadata and
modify it to what it should look which means to be exact
as https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L64
"""
# Using OrderedDict, because order of JSON elements is important
v1_metadata = OrderedDict(self.old_image_config)
# Update image creation date
v1_metadata['created'] = self.date
# Remove unnecessary elements
# Do not fail if key is not found
for key in 'history', 'rootfs', 'container':
v1_metadata.pop(key, None)
# Docker internally changes the order of keys between
# exported metadata (why oh why?!). We need to add 'os'
# element after 'layer_id'
operating_system = v1_metadata.pop('os', None)
# The 'layer_id' element is the chain_id of the
# squashed layer
v1_metadata['layer_id'] = "sha256:%s" % self.chain_ids[-1]
# Add back 'os' element
if operating_system:
v1_metadata['os'] = operating_system
# The 'parent' element is the name of the directory (inside the
# exported tar archive) of the last layer that we move
# (layer below squashed layer)
if self.layer_paths_to_move:
if self.layer_paths_to_squash:
parent = self.layer_paths_to_move[-1]
else:
parent = self.layer_paths_to_move[0]
v1_metadata['parent'] = "sha256:%s" % parent
# The 'Image' element is the id of the layer from which we squash
if self.squash_id:
# Update image id, should be one layer below squashed layer
v1_metadata['config']['Image'] = self.squash_id
else:
v1_metadata['config']['Image'] = ""
# Get the sha256sum of the JSON exported metadata,
# we do not care about the metadata anymore
sha = self._dump_json(v1_metadata)[1]
return sha
def _generate_last_layer_metadata(self, layer_path_id, old_layer_path=None):
if not old_layer_path:
old_layer_path = layer_path_id
config_file = os.path.join(
self.old_image_dir, old_layer_path, "json")
with open(config_file, 'r') as f:
config = json.load(f, object_pairs_hook=OrderedDict)
config['created'] = self.date
if self.squash_id:
# Update image id, should be one layer below squashed layer
config['config']['Image'] = self.squash_id
else:
config['config']['Image'] = ""
# Update 'parent' - it should be path to the last layer to move
if self.layer_paths_to_move:
config['parent'] = self.layer_paths_to_move[-1]
else:
config.pop("parent", None)
# Update 'id' - it should be the path to the layer
config['id'] = layer_path_id
config.pop("container", None)
return config
def _generate_image_metadata(self):
# First - read old image config, we'll update it instead of
# generating one from scratch
metadata = OrderedDict(self.old_image_config)
# Update image creation date
metadata['created'] = self.date
# Remove unnecessary or old fields
metadata.pop("container", None)
# Remove squashed layers from history
metadata['history'] = metadata['history'][:len(self.layers_to_move)]
# Remove diff_ids for squashed layers
metadata['rootfs']['diff_ids'] = metadata['rootfs'][
'diff_ids'][:len(self.layer_paths_to_move)]
history = {'comment': '', 'created': self.date}
if self.layer_paths_to_squash:
# Add diff_ids for the squashed layer
metadata['rootfs']['diff_ids'].append(
"sha256:%s" % self.diff_ids[-1])
else:
history['empty_layer'] = True
# Add new entry for squashed layer to history
metadata['history'].append(history)
if self.squash_id:
# Update image id, should be one layer below squashed layer
metadata['config']['Image'] = self.squash_id
else:
metadata['config']['Image'] = ""
return metadata
|
goldmann/docker-squash
|
docker_squash/v2_image.py
|
V2Image._read_layer_paths
|
python
|
def _read_layer_paths(self, old_image_config, old_image_manifest, layers_to_move):
# In manifest.json we do not have listed all layers
# but only layers that do contain some data.
current_manifest_layer = 0
layer_paths_to_move = []
layer_paths_to_squash = []
# Iterate over image history, from base image to top layer
for i, layer in enumerate(old_image_config['history']):
# If it's not an empty layer get the id
# (directory name) where the layer's data is
# stored
if not layer.get('empty_layer', False):
layer_id = old_image_manifest['Layers'][
current_manifest_layer].rsplit('/')[0]
# Check if this layer should be moved or squashed
if len(layers_to_move) > i:
layer_paths_to_move.append(layer_id)
else:
layer_paths_to_squash.append(layer_id)
current_manifest_layer += 1
return layer_paths_to_squash, layer_paths_to_move
|
In case of v2 format, layer id's are not the same as the id's
used in the exported tar archive to name directories for layers.
These id's can be found in the configuration files saved with
the image - we need to read them.
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/v2_image.py#L130-L163
| null |
class V2Image(Image):
FORMAT = 'v2'
def _before_squashing(self):
super(V2Image, self)._before_squashing()
# Read old image manifest file
self.old_image_manifest = self._read_json_file(
os.path.join(self.old_image_dir, "manifest.json"))[0]
# Read old image config file
self.old_image_config = self._read_json_file(os.path.join(
self.old_image_dir, self.old_image_manifest['Config']))
# Read layer paths inside of the tar archive
# We split it into layers that needs to be squashed
# and layers that needs to be moved as-is
self.layer_paths_to_squash, self.layer_paths_to_move = self._read_layer_paths(
self.old_image_config, self.old_image_manifest, self.layers_to_move)
if self.layer_paths_to_move:
self.squash_id = self.layer_paths_to_move[-1]
def _squash(self):
if self.layer_paths_to_squash:
# Prepare the directory
os.makedirs(self.squashed_dir)
# Merge data layers
self._squash_layers(self.layer_paths_to_squash,
self.layer_paths_to_move)
self.diff_ids = self._generate_diff_ids()
self.chain_ids = self._generate_chain_ids(self.diff_ids)
metadata = self._generate_image_metadata()
image_id = self._write_image_metadata(metadata)
layer_path_id = None
if self.layer_paths_to_squash:
# Compute layer id to use to name the directory where
# we store the layer data inside of the tar archive
layer_path_id = self._generate_squashed_layer_path_id()
metadata = self._generate_last_layer_metadata(
layer_path_id, self.layer_paths_to_squash[0])
self._write_squashed_layer_metadata(metadata)
# Write version file to the squashed layer
# Even Docker doesn't know why it's needed...
self._write_version_file(self.squashed_dir)
# Move the temporary squashed layer directory to the correct one
shutil.move(self.squashed_dir, os.path.join(
self.new_image_dir, layer_path_id))
manifest = self._generate_manifest_metadata(
image_id, self.image_name, self.image_tag, self.old_image_manifest, self.layer_paths_to_move, layer_path_id)
self._write_manifest_metadata(manifest)
repository_image_id = manifest[0]["Layers"][-1].split("/")[0]
# Move all the layers that should be untouched
self._move_layers(self.layer_paths_to_move,
self.old_image_dir, self.new_image_dir)
repositories_file = os.path.join(self.new_image_dir, "repositories")
self._generate_repositories_json(
repositories_file, repository_image_id, self.image_name, self.image_tag)
return image_id
def _write_image_metadata(self, metadata):
# Create JSON from the metadata
# Docker adds new line at the end
json_metadata, image_id = self._dump_json(metadata, True)
image_metadata_file = os.path.join(
self.new_image_dir, "%s.json" % image_id)
self._write_json_metadata(json_metadata, image_metadata_file)
return image_id
def _write_squashed_layer_metadata(self, metadata):
layer_metadata_file = os.path.join(self.squashed_dir, "json")
json_metadata = self._dump_json(metadata)[0]
self._write_json_metadata(json_metadata, layer_metadata_file)
def _write_manifest_metadata(self, manifest):
manifest_file = os.path.join(self.new_image_dir, "manifest.json")
json_manifest = self._dump_json(manifest, True)[0]
self._write_json_metadata(json_manifest, manifest_file)
def _generate_manifest_metadata(self, image_id, image_name, image_tag, old_image_manifest, layer_paths_to_move, layer_path_id=None):
manifest = OrderedDict()
manifest['Config'] = "%s.json" % image_id
if image_name and image_tag:
manifest['RepoTags'] = ["%s:%s" % (image_name, image_tag)]
manifest['Layers'] = old_image_manifest[
'Layers'][:len(layer_paths_to_move)]
if layer_path_id:
manifest['Layers'].append("%s/layer.tar" % layer_path_id)
return [manifest]
def _read_json_file(self, json_file):
""" Helper function to read JSON file as OrderedDict """
self.log.debug("Reading '%s' JSON file..." % json_file)
with open(json_file, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def _generate_chain_id(self, chain_ids, diff_ids, parent_chain_id):
if parent_chain_id == None:
return self._generate_chain_id(chain_ids, diff_ids[1:], diff_ids[0])
chain_ids.append(parent_chain_id)
if len(diff_ids) == 0:
return parent_chain_id
# This probably should not be hardcoded
to_hash = "sha256:%s sha256:%s" % (parent_chain_id, diff_ids[0])
digest = hashlib.sha256(str(to_hash).encode('utf8')).hexdigest()
return self._generate_chain_id(chain_ids, diff_ids[1:], digest)
def _generate_chain_ids(self, diff_ids):
chain_ids = []
self._generate_chain_id(chain_ids, diff_ids, None)
return chain_ids
def _generate_diff_ids(self):
diff_ids = []
for path in self.layer_paths_to_move:
sha256 = self._compute_sha256(os.path.join(self.old_image_dir, path, "layer.tar"))
diff_ids.append(sha256)
if self.layer_paths_to_squash:
sha256 = self._compute_sha256(os.path.join(self.squashed_dir, "layer.tar"))
diff_ids.append(sha256)
return diff_ids
def _compute_sha256(self, layer_tar):
sha256 = hashlib.sha256()
with open(layer_tar, 'rb') as f:
while True:
# Read in 10MB chunks
data = f.read(10485760)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
def _generate_squashed_layer_path_id(self):
"""
This function generates the id used to name the directory to
store the squashed layer content in the archive.
This mimics what Docker does here: https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L42
To make it simpler we do reuse old image metadata and
modify it to what it should look which means to be exact
as https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L64
"""
# Using OrderedDict, because order of JSON elements is important
v1_metadata = OrderedDict(self.old_image_config)
# Update image creation date
v1_metadata['created'] = self.date
# Remove unnecessary elements
# Do not fail if key is not found
for key in 'history', 'rootfs', 'container':
v1_metadata.pop(key, None)
# Docker internally changes the order of keys between
# exported metadata (why oh why?!). We need to add 'os'
# element after 'layer_id'
operating_system = v1_metadata.pop('os', None)
# The 'layer_id' element is the chain_id of the
# squashed layer
v1_metadata['layer_id'] = "sha256:%s" % self.chain_ids[-1]
# Add back 'os' element
if operating_system:
v1_metadata['os'] = operating_system
# The 'parent' element is the name of the directory (inside the
# exported tar archive) of the last layer that we move
# (layer below squashed layer)
if self.layer_paths_to_move:
if self.layer_paths_to_squash:
parent = self.layer_paths_to_move[-1]
else:
parent = self.layer_paths_to_move[0]
v1_metadata['parent'] = "sha256:%s" % parent
# The 'Image' element is the id of the layer from which we squash
if self.squash_id:
# Update image id, should be one layer below squashed layer
v1_metadata['config']['Image'] = self.squash_id
else:
v1_metadata['config']['Image'] = ""
# Get the sha256sum of the JSON exported metadata,
# we do not care about the metadata anymore
sha = self._dump_json(v1_metadata)[1]
return sha
def _generate_last_layer_metadata(self, layer_path_id, old_layer_path=None):
if not old_layer_path:
old_layer_path = layer_path_id
config_file = os.path.join(
self.old_image_dir, old_layer_path, "json")
with open(config_file, 'r') as f:
config = json.load(f, object_pairs_hook=OrderedDict)
config['created'] = self.date
if self.squash_id:
# Update image id, should be one layer below squashed layer
config['config']['Image'] = self.squash_id
else:
config['config']['Image'] = ""
# Update 'parent' - it should be path to the last layer to move
if self.layer_paths_to_move:
config['parent'] = self.layer_paths_to_move[-1]
else:
config.pop("parent", None)
# Update 'id' - it should be the path to the layer
config['id'] = layer_path_id
config.pop("container", None)
return config
def _generate_image_metadata(self):
# First - read old image config, we'll update it instead of
# generating one from scratch
metadata = OrderedDict(self.old_image_config)
# Update image creation date
metadata['created'] = self.date
# Remove unnecessary or old fields
metadata.pop("container", None)
# Remove squashed layers from history
metadata['history'] = metadata['history'][:len(self.layers_to_move)]
# Remove diff_ids for squashed layers
metadata['rootfs']['diff_ids'] = metadata['rootfs'][
'diff_ids'][:len(self.layer_paths_to_move)]
history = {'comment': '', 'created': self.date}
if self.layer_paths_to_squash:
# Add diff_ids for the squashed layer
metadata['rootfs']['diff_ids'].append(
"sha256:%s" % self.diff_ids[-1])
else:
history['empty_layer'] = True
# Add new entry for squashed layer to history
metadata['history'].append(history)
if self.squash_id:
# Update image id, should be one layer below squashed layer
metadata['config']['Image'] = self.squash_id
else:
metadata['config']['Image'] = ""
return metadata
|
goldmann/docker-squash
|
docker_squash/v2_image.py
|
V2Image._generate_squashed_layer_path_id
|
python
|
def _generate_squashed_layer_path_id(self):
# Using OrderedDict, because order of JSON elements is important
v1_metadata = OrderedDict(self.old_image_config)
# Update image creation date
v1_metadata['created'] = self.date
# Remove unnecessary elements
# Do not fail if key is not found
for key in 'history', 'rootfs', 'container':
v1_metadata.pop(key, None)
# Docker internally changes the order of keys between
# exported metadata (why oh why?!). We need to add 'os'
# element after 'layer_id'
operating_system = v1_metadata.pop('os', None)
# The 'layer_id' element is the chain_id of the
# squashed layer
v1_metadata['layer_id'] = "sha256:%s" % self.chain_ids[-1]
# Add back 'os' element
if operating_system:
v1_metadata['os'] = operating_system
# The 'parent' element is the name of the directory (inside the
# exported tar archive) of the last layer that we move
# (layer below squashed layer)
if self.layer_paths_to_move:
if self.layer_paths_to_squash:
parent = self.layer_paths_to_move[-1]
else:
parent = self.layer_paths_to_move[0]
v1_metadata['parent'] = "sha256:%s" % parent
# The 'Image' element is the id of the layer from which we squash
if self.squash_id:
# Update image id, should be one layer below squashed layer
v1_metadata['config']['Image'] = self.squash_id
else:
v1_metadata['config']['Image'] = ""
# Get the sha256sum of the JSON exported metadata,
# we do not care about the metadata anymore
sha = self._dump_json(v1_metadata)[1]
return sha
|
This function generates the id used to name the directory to
store the squashed layer content in the archive.
This mimics what Docker does here: https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L42
To make it simpler we do reuse old image metadata and
modify it to what it should look which means to be exact
as https://github.com/docker/docker/blob/v1.10.0-rc1/image/v1/imagev1.go#L64
|
train
|
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/v2_image.py#L215-L273
| null |
class V2Image(Image):
FORMAT = 'v2'
def _before_squashing(self):
super(V2Image, self)._before_squashing()
# Read old image manifest file
self.old_image_manifest = self._read_json_file(
os.path.join(self.old_image_dir, "manifest.json"))[0]
# Read old image config file
self.old_image_config = self._read_json_file(os.path.join(
self.old_image_dir, self.old_image_manifest['Config']))
# Read layer paths inside of the tar archive
# We split it into layers that needs to be squashed
# and layers that needs to be moved as-is
self.layer_paths_to_squash, self.layer_paths_to_move = self._read_layer_paths(
self.old_image_config, self.old_image_manifest, self.layers_to_move)
if self.layer_paths_to_move:
self.squash_id = self.layer_paths_to_move[-1]
def _squash(self):
if self.layer_paths_to_squash:
# Prepare the directory
os.makedirs(self.squashed_dir)
# Merge data layers
self._squash_layers(self.layer_paths_to_squash,
self.layer_paths_to_move)
self.diff_ids = self._generate_diff_ids()
self.chain_ids = self._generate_chain_ids(self.diff_ids)
metadata = self._generate_image_metadata()
image_id = self._write_image_metadata(metadata)
layer_path_id = None
if self.layer_paths_to_squash:
# Compute layer id to use to name the directory where
# we store the layer data inside of the tar archive
layer_path_id = self._generate_squashed_layer_path_id()
metadata = self._generate_last_layer_metadata(
layer_path_id, self.layer_paths_to_squash[0])
self._write_squashed_layer_metadata(metadata)
# Write version file to the squashed layer
# Even Docker doesn't know why it's needed...
self._write_version_file(self.squashed_dir)
# Move the temporary squashed layer directory to the correct one
shutil.move(self.squashed_dir, os.path.join(
self.new_image_dir, layer_path_id))
manifest = self._generate_manifest_metadata(
image_id, self.image_name, self.image_tag, self.old_image_manifest, self.layer_paths_to_move, layer_path_id)
self._write_manifest_metadata(manifest)
repository_image_id = manifest[0]["Layers"][-1].split("/")[0]
# Move all the layers that should be untouched
self._move_layers(self.layer_paths_to_move,
self.old_image_dir, self.new_image_dir)
repositories_file = os.path.join(self.new_image_dir, "repositories")
self._generate_repositories_json(
repositories_file, repository_image_id, self.image_name, self.image_tag)
return image_id
def _write_image_metadata(self, metadata):
# Create JSON from the metadata
# Docker adds new line at the end
json_metadata, image_id = self._dump_json(metadata, True)
image_metadata_file = os.path.join(
self.new_image_dir, "%s.json" % image_id)
self._write_json_metadata(json_metadata, image_metadata_file)
return image_id
def _write_squashed_layer_metadata(self, metadata):
layer_metadata_file = os.path.join(self.squashed_dir, "json")
json_metadata = self._dump_json(metadata)[0]
self._write_json_metadata(json_metadata, layer_metadata_file)
def _write_manifest_metadata(self, manifest):
manifest_file = os.path.join(self.new_image_dir, "manifest.json")
json_manifest = self._dump_json(manifest, True)[0]
self._write_json_metadata(json_manifest, manifest_file)
def _generate_manifest_metadata(self, image_id, image_name, image_tag, old_image_manifest, layer_paths_to_move, layer_path_id=None):
manifest = OrderedDict()
manifest['Config'] = "%s.json" % image_id
if image_name and image_tag:
manifest['RepoTags'] = ["%s:%s" % (image_name, image_tag)]
manifest['Layers'] = old_image_manifest[
'Layers'][:len(layer_paths_to_move)]
if layer_path_id:
manifest['Layers'].append("%s/layer.tar" % layer_path_id)
return [manifest]
def _read_json_file(self, json_file):
""" Helper function to read JSON file as OrderedDict """
self.log.debug("Reading '%s' JSON file..." % json_file)
with open(json_file, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def _read_layer_paths(self, old_image_config, old_image_manifest, layers_to_move):
"""
In case of v2 format, layer id's are not the same as the id's
used in the exported tar archive to name directories for layers.
These id's can be found in the configuration files saved with
the image - we need to read them.
"""
# In manifest.json we do not have listed all layers
# but only layers that do contain some data.
current_manifest_layer = 0
layer_paths_to_move = []
layer_paths_to_squash = []
# Iterate over image history, from base image to top layer
for i, layer in enumerate(old_image_config['history']):
# If it's not an empty layer get the id
# (directory name) where the layer's data is
# stored
if not layer.get('empty_layer', False):
layer_id = old_image_manifest['Layers'][
current_manifest_layer].rsplit('/')[0]
# Check if this layer should be moved or squashed
if len(layers_to_move) > i:
layer_paths_to_move.append(layer_id)
else:
layer_paths_to_squash.append(layer_id)
current_manifest_layer += 1
return layer_paths_to_squash, layer_paths_to_move
def _generate_chain_id(self, chain_ids, diff_ids, parent_chain_id):
if parent_chain_id == None:
return self._generate_chain_id(chain_ids, diff_ids[1:], diff_ids[0])
chain_ids.append(parent_chain_id)
if len(diff_ids) == 0:
return parent_chain_id
# This probably should not be hardcoded
to_hash = "sha256:%s sha256:%s" % (parent_chain_id, diff_ids[0])
digest = hashlib.sha256(str(to_hash).encode('utf8')).hexdigest()
return self._generate_chain_id(chain_ids, diff_ids[1:], digest)
def _generate_chain_ids(self, diff_ids):
chain_ids = []
self._generate_chain_id(chain_ids, diff_ids, None)
return chain_ids
def _generate_diff_ids(self):
diff_ids = []
for path in self.layer_paths_to_move:
sha256 = self._compute_sha256(os.path.join(self.old_image_dir, path, "layer.tar"))
diff_ids.append(sha256)
if self.layer_paths_to_squash:
sha256 = self._compute_sha256(os.path.join(self.squashed_dir, "layer.tar"))
diff_ids.append(sha256)
return diff_ids
def _compute_sha256(self, layer_tar):
sha256 = hashlib.sha256()
with open(layer_tar, 'rb') as f:
while True:
# Read in 10MB chunks
data = f.read(10485760)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
def _generate_last_layer_metadata(self, layer_path_id, old_layer_path=None):
if not old_layer_path:
old_layer_path = layer_path_id
config_file = os.path.join(
self.old_image_dir, old_layer_path, "json")
with open(config_file, 'r') as f:
config = json.load(f, object_pairs_hook=OrderedDict)
config['created'] = self.date
if self.squash_id:
# Update image id, should be one layer below squashed layer
config['config']['Image'] = self.squash_id
else:
config['config']['Image'] = ""
# Update 'parent' - it should be path to the last layer to move
if self.layer_paths_to_move:
config['parent'] = self.layer_paths_to_move[-1]
else:
config.pop("parent", None)
# Update 'id' - it should be the path to the layer
config['id'] = layer_path_id
config.pop("container", None)
return config
def _generate_image_metadata(self):
# First - read old image config, we'll update it instead of
# generating one from scratch
metadata = OrderedDict(self.old_image_config)
# Update image creation date
metadata['created'] = self.date
# Remove unnecessary or old fields
metadata.pop("container", None)
# Remove squashed layers from history
metadata['history'] = metadata['history'][:len(self.layers_to_move)]
# Remove diff_ids for squashed layers
metadata['rootfs']['diff_ids'] = metadata['rootfs'][
'diff_ids'][:len(self.layer_paths_to_move)]
history = {'comment': '', 'created': self.date}
if self.layer_paths_to_squash:
# Add diff_ids for the squashed layer
metadata['rootfs']['diff_ids'].append(
"sha256:%s" % self.diff_ids[-1])
else:
history['empty_layer'] = True
# Add new entry for squashed layer to history
metadata['history'].append(history)
if self.squash_id:
# Update image id, should be one layer below squashed layer
metadata['config']['Image'] = self.squash_id
else:
metadata['config']['Image'] = ""
return metadata
|
ahawker/ulid
|
ulid/ulid.py
|
Timestamp.datetime
|
python
|
def datetime(self) -> hints.Datetime:
mills = self.int
return datetime.datetime.utcfromtimestamp(mills // 1000.0).replace(microsecond=mills % 1000 * 1000)
|
Creates a :class:`~datetime.datetime` instance (assumes UTC) from the Unix time value of the timestamp
with millisecond precision.
:return: Timestamp in datetime form.
:rtype: :class:`~datetime.datetime`
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/ulid.py#L182-L191
| null |
class Timestamp(MemoryView):
"""
Represents the timestamp portion of a ULID.
* Unix time (time since epoch) in milliseconds.
* First 48 bits of ULID when in binary format.
* First 10 characters of ULID when in string format.
"""
__slots__ = MemoryView.__slots__
@property
def str(self) -> hints.Str:
"""
Computes the string value of the timestamp from the underlying :class:`~memoryview` in Base32 encoding.
:return: Timestamp in Base32 string form.
:rtype: :class:`~str`
:raises ValueError: if underlying :class:`~memoryview` cannot be encoded
"""
return base32.encode_timestamp(self.memory)
@property
def timestamp(self) -> hints.Float:
"""
Computes the Unix time (seconds since epoch) from its :class:`~memoryview`.
:return: Timestamp in Unix time (seconds since epoch) form.
:rtype: :class:`~float`
"""
return self.int / 1000.0
@property
|
ahawker/ulid
|
ulid/api.py
|
new
|
python
|
def new() -> ulid.ULID:
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
|
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L35-L47
| null |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
parse
|
python
|
def parse(value: ULIDPrimitive) -> ulid.ULID:
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
|
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L50-L86
|
[
"def from_bytes(value: hints.Buffer) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,\n :class:`~bytearray`, or :class:`~memoryview` value.\n\n :param value: 16 bytes\n :type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`\n :return: ULID from buffer value\n :rtype: :class:`~ulid.ulid.ULID`\n :raises ValueError: when the value is not 16 bytes\n \"\"\"\n length = len(value)\n if length != 16:\n raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))\n\n return ulid.ULID(value)\n",
"def from_int(value: int) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.\n\n :param value: 128 bit integer\n :type value: :class:`~int`\n :return: ULID from integer value\n :rtype: :class:`~ulid.ulid.ULID`\n :raises ValueError: when the value is not a 128 bit integer\n \"\"\"\n if value < 0:\n raise ValueError('Expects positive integer')\n\n length = (value.bit_length() + 7) // 8\n if length > 16:\n raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))\n\n return ulid.ULID(value.to_bytes(16, byteorder='big'))\n",
"def from_str(value: str) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.\n\n :param value: Base32 encoded string\n :type value: :class:`~str`\n :return: ULID from string value\n :rtype: :class:`~ulid.ulid.ULID`\n :raises ValueError: when the value is not 26 characters or malformed\n \"\"\"\n return ulid.ULID(base32.decode_ulid(value))\n",
"def from_uuid(value: uuid.UUID) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.\n\n :param value: UUIDv4 value\n :type value: :class:`~uuid.UUID`\n :return: ULID from UUID value\n :rtype: :class:`~ulid.ulid.ULID`\n \"\"\"\n return ulid.ULID(value.bytes)\n",
"def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.\n\n The following types are supported for timestamp values:\n\n * :class:`~datetime.datetime`\n * :class:`~int`\n * :class:`~float`\n * :class:`~str`\n * :class:`~memoryview`\n * :class:`~ulid.ulid.Timestamp`\n * :class:`~ulid.ulid.ULID`\n * :class:`~bytes`\n * :class:`~bytearray`\n\n :param timestamp: Unix timestamp in seconds\n :type timestamp: See docstring for types\n :return: ULID using given timestamp and new randomness\n :rtype: :class:`~ulid.ulid.ULID`\n :raises ValueError: when the value is an unsupported type\n :raises ValueError: when the value is a string and cannot be Base32 decoded\n :raises ValueError: when the value is or was converted to something 48 bits\n \"\"\"\n if isinstance(timestamp, datetime.datetime):\n timestamp = timestamp.timestamp()\n if isinstance(timestamp, (int, float)):\n timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')\n elif isinstance(timestamp, str):\n timestamp = base32.decode_timestamp(timestamp)\n elif isinstance(timestamp, memoryview):\n timestamp = timestamp.tobytes()\n elif isinstance(timestamp, ulid.Timestamp):\n timestamp = timestamp.bytes\n elif isinstance(timestamp, ulid.ULID):\n timestamp = timestamp.timestamp().bytes\n\n if not isinstance(timestamp, (bytes, bytearray)):\n raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '\n 'bytes, or bytearray; got {}'.format(type(timestamp).__name__))\n\n length = len(timestamp)\n if length != 6:\n raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))\n\n randomness = os.urandom(10)\n return ulid.ULID(timestamp + randomness)\n",
"def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:\n \"\"\"\n Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.\n\n The following types are supported for randomness values:\n\n * :class:`~int`\n * :class:`~float`\n * :class:`~str`\n * :class:`~memoryview`\n * :class:`~ulid.ulid.Randomness`\n * :class:`~ulid.ulid.ULID`\n * :class:`~bytes`\n * :class:`~bytearray`\n\n :param randomness: Random bytes\n :type randomness: See docstring for types\n :return: ULID using new timestamp and given randomness\n :rtype: :class:`~ulid.ulid.ULID`\n :raises ValueError: when the value is an unsupported type\n :raises ValueError: when the value is a string and cannot be Base32 decoded\n :raises ValueError: when the value is or was converted to something 80 bits\n \"\"\"\n if isinstance(randomness, (int, float)):\n randomness = int(randomness).to_bytes(10, byteorder='big')\n elif isinstance(randomness, str):\n randomness = base32.decode_randomness(randomness)\n elif isinstance(randomness, memoryview):\n randomness = randomness.tobytes()\n elif isinstance(randomness, ulid.Randomness):\n randomness = randomness.bytes\n elif isinstance(randomness, ulid.ULID):\n randomness = randomness.randomness().bytes\n\n if not isinstance(randomness, (bytes, bytearray)):\n raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '\n 'bytes, or bytearray; got {}'.format(type(randomness).__name__))\n\n length = len(randomness)\n if length != 10:\n raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))\n\n timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')\n return ulid.ULID(timestamp + randomness)\n"
] |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_bytes
|
python
|
def from_bytes(value: hints.Buffer) -> ulid.ULID:
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
|
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L89-L104
| null |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_int
|
python
|
def from_int(value: int) -> ulid.ULID:
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
|
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L107-L124
| null |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_str
|
python
|
def from_str(value: str) -> ulid.ULID:
return ulid.ULID(base32.decode_ulid(value))
|
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L127-L137
|
[
"def decode_ulid(value: str) -> bytes:\n \"\"\"\n Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID\n strings specifically and is not meant for arbitrary decoding.\n\n :param value: String to decode\n :type value: :class:`~str`\n :return: Value decoded from Base32 string\n :rtype: :class:`~bytes`\n :raises ValueError: when value is not 26 characters\n :raises ValueError: when value cannot be encoded in ASCII\n \"\"\"\n encoded = str_to_bytes(value, 26)\n\n decoding = DECODING\n\n return bytes((\n ((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,\n ((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,\n ((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,\n ((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,\n ((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,\n ((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,\n ((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,\n ((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,\n ((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,\n ((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,\n ((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,\n ((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,\n ((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,\n ((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,\n ((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,\n ((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF\n ))\n"
] |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_uuid
|
python
|
def from_uuid(value: uuid.UUID) -> ulid.ULID:
return ulid.ULID(value.bytes)
|
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L140-L149
| null |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_timestamp
|
python
|
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
|
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L152-L198
| null |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
"""
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/api.py
|
from_randomness
|
python
|
def from_randomness(randomness: RandomnessPrimitive) -> ulid.ULID:
if isinstance(randomness, (int, float)):
randomness = int(randomness).to_bytes(10, byteorder='big')
elif isinstance(randomness, str):
randomness = base32.decode_randomness(randomness)
elif isinstance(randomness, memoryview):
randomness = randomness.tobytes()
elif isinstance(randomness, ulid.Randomness):
randomness = randomness.bytes
elif isinstance(randomness, ulid.ULID):
randomness = randomness.randomness().bytes
if not isinstance(randomness, (bytes, bytearray)):
raise ValueError('Expected int, float, str, memoryview, Randomness, ULID, '
'bytes, or bytearray; got {}'.format(type(randomness).__name__))
length = len(randomness)
if length != 10:
raise ValueError('Expects randomness to be 80 bits; got {} bytes'.format(length))
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
return ulid.ULID(timestamp + randomness)
|
Create a new :class:`~ulid.ulid.ULID` instance using the given randomness value of a supported type.
The following types are supported for randomness values:
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Randomness`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param randomness: Random bytes
:type randomness: See docstring for types
:return: ULID using new timestamp and given randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 80 bits
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/api.py#L201-L244
|
[
"def decode_randomness(randomness: str) -> bytes:\n \"\"\"\n Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.\n\n The given :class:`~str` are expected to represent the last 16 characters of a ULID, which\n are cryptographically secure random values.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID\n strings specifically and is not meant for arbitrary decoding.\n\n :param randomness: String to decode\n :type randomness: :class:`~str`\n :return: Value decoded from Base32 string\n :rtype: :class:`~bytes`\n :raises ValueError: when value is not 16 characters\n :raises ValueError: when value cannot be encoded in ASCII\n \"\"\"\n encoded = str_to_bytes(randomness, 16)\n\n decoding = DECODING\n\n return bytes((\n ((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,\n ((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,\n ((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,\n ((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,\n ((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,\n ((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,\n ((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,\n ((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,\n ((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,\n ((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF\n ))\n"
] |
"""
ulid/api
~~~~~~~~
Defines the public API of the `ulid` package.
"""
import datetime
import os
import time
import typing
import uuid
from . import base32, hints, ulid
__all__ = ['new', 'parse', 'from_bytes', 'from_int', 'from_str', 'from_uuid', 'from_timestamp', 'from_randomness']
#: Type hint that defines multiple primitive types that can represent
#: a Unix timestamp in seconds.
TimestampPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
datetime.datetime, ulid.Timestamp, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent
#: randomness.
RandomnessPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
ulid.Randomness, ulid.ULID]
#: Type hint that defines multiple primitive types that can represent a full ULID.
ULIDPrimitive = typing.Union[int, float, str, bytes, bytearray, memoryview, # pylint: disable=invalid-name
uuid.UUID, ulid.ULID]
def new() -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance.
The timestamp is created from :func:`~time.time`.
The randomness is created from :func:`~os.urandom`.
:return: ULID from current timestamp
:rtype: :class:`~ulid.ulid.ULID`
"""
timestamp = int(time.time() * 1000).to_bytes(6, byteorder='big')
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
def parse(value: ULIDPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given value.
.. note:: This method should only be used when the caller is trying to parse a ULID from
a value when they're unsure what format/primitive type it will be given in.
:param value: ULID value of any supported type
:type value: :class:`~ulid.api.ULIDPrimitive`
:return: ULID from value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when unable to parse a ULID from the value
"""
if isinstance(value, ulid.ULID):
return value
if isinstance(value, uuid.UUID):
return from_uuid(value)
if isinstance(value, str):
len_value = len(value)
if len_value == 36:
return from_uuid(uuid.UUID(value))
if len_value == 32:
return from_uuid(uuid.UUID(value))
if len_value == 26:
return from_str(value)
if len_value == 16:
return from_randomness(value)
if len_value == 10:
return from_timestamp(value)
raise ValueError('Cannot create ULID from string of length {}'.format(len_value))
if isinstance(value, (int, float)):
return from_int(int(value))
if isinstance(value, (bytes, bytearray)):
return from_bytes(value)
if isinstance(value, memoryview):
return from_bytes(value.tobytes())
raise ValueError('Cannot create ULID from type {}'.format(value.__class__.__name__))
def from_bytes(value: hints.Buffer) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~bytes`,
:class:`~bytearray`, or :class:`~memoryview` value.
:param value: 16 bytes
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: ULID from buffer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects bytes to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value)
def from_int(value: int) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~int` value.
:param value: 128 bit integer
:type value: :class:`~int`
:return: ULID from integer value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not a 128 bit integer
"""
if value < 0:
raise ValueError('Expects positive integer')
length = (value.bit_length() + 7) // 8
if length > 16:
raise ValueError('Expects integer to be 128 bits; got {} bytes'.format(length))
return ulid.ULID(value.to_bytes(16, byteorder='big'))
def from_str(value: str) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~str` value.
:param value: Base32 encoded string
:type value: :class:`~str`
:return: ULID from string value
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is not 26 characters or malformed
"""
return ulid.ULID(base32.decode_ulid(value))
def from_uuid(value: uuid.UUID) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance from the given :class:`~uuid.UUID` value.
:param value: UUIDv4 value
:type value: :class:`~uuid.UUID`
:return: ULID from UUID value
:rtype: :class:`~ulid.ulid.ULID`
"""
return ulid.ULID(value.bytes)
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness)
|
ahawker/ulid
|
ulid/base32.py
|
encode
|
python
|
def encode(value: hints.Buffer) -> str:
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
|
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L55-L80
|
[
"def encode_ulid(value: hints.Buffer) -> str:\n \"\"\"\n Encode the given buffer to a :class:`~str` using Base32 encoding.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID\n bytes specifically and is not meant for arbitrary encoding.\n\n :param value: Bytes to encode\n :type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`\n :return: Value encoded as a Base32 string\n :rtype: :class:`~str`\n :raises ValueError: when the value is not 16 bytes\n \"\"\"\n length = len(value)\n if length != 16:\n raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))\n\n encoding = ENCODING\n\n return \\\n encoding[(value[0] & 224) >> 5] + \\\n encoding[value[0] & 31] + \\\n encoding[(value[1] & 248) >> 3] + \\\n encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \\\n encoding[((value[2] & 62) >> 1)] + \\\n encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \\\n encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \\\n encoding[(value[4] & 124) >> 2] + \\\n encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \\\n encoding[value[5] & 31] + \\\n encoding[(value[6] & 248) >> 3] + \\\n encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \\\n encoding[(value[7] & 62) >> 1] + \\\n encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \\\n encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \\\n encoding[(value[9] & 124) >> 2] + \\\n encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \\\n encoding[value[10] & 31] + \\\n encoding[(value[11] & 248) >> 3] + \\\n encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \\\n encoding[(value[12] & 62) >> 1] + \\\n encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \\\n encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \\\n encoding[(value[14] & 124) >> 2] + \\\n encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \\\n encoding[value[15] & 31]\n",
"def encode_timestamp(timestamp: hints.Buffer) -> str:\n \"\"\"\n Encode the given buffer to a :class:`~str` using Base32 encoding.\n\n The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which\n are a timestamp in milliseconds.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID\n bytes specifically and is not meant for arbitrary encoding.\n\n :param timestamp: Bytes to encode\n :type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`\n :return: Value encoded as a Base32 string\n :rtype: :class:`~str`\n :raises ValueError: when the timestamp is not 6 bytes\n \"\"\"\n length = len(timestamp)\n if length != 6:\n raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))\n\n encoding = ENCODING\n\n return \\\n encoding[(timestamp[0] & 224) >> 5] + \\\n encoding[timestamp[0] & 31] + \\\n encoding[(timestamp[1] & 248) >> 3] + \\\n encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \\\n encoding[((timestamp[2] & 62) >> 1)] + \\\n encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \\\n encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \\\n encoding[(timestamp[4] & 124) >> 2] + \\\n encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \\\n encoding[timestamp[5] & 31]\n",
"def encode_randomness(randomness: hints.Buffer) -> str:\n \"\"\"\n Encode the given buffer to a :class:`~str` using Base32 encoding.\n\n The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which\n are cryptographically secure random values.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID\n bytes specifically and is not meant for arbitrary encoding.\n\n :param randomness: Bytes to encode\n :type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`\n :return: Value encoded as a Base32 string\n :rtype: :class:`~str`\n :raises ValueError: when the randomness is not 10 bytes\n \"\"\"\n length = len(randomness)\n if length != 10:\n raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))\n\n encoding = ENCODING\n\n return \\\n encoding[(randomness[0] & 248) >> 3] + \\\n encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \\\n encoding[(randomness[1] & 62) >> 1] + \\\n encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \\\n encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \\\n encoding[(randomness[3] & 124) >> 2] + \\\n encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \\\n encoding[randomness[4] & 31] + \\\n encoding[(randomness[5] & 248) >> 3] + \\\n encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \\\n encoding[(randomness[6] & 62) >> 1] + \\\n encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \\\n encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \\\n encoding[(randomness[8] & 124) >> 2] + \\\n encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \\\n encoding[randomness[9] & 31]\n"
] |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
encode_ulid
|
python
|
def encode_ulid(value: hints.Buffer) -> str:
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
|
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L83-L128
| null |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
encode_timestamp
|
python
|
def encode_timestamp(timestamp: hints.Buffer) -> str:
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
|
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L131-L163
| null |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
encode_randomness
|
python
|
def encode_randomness(randomness: hints.Buffer) -> str:
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
|
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L166-L204
| null |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
decode
|
python
|
def decode(value: str) -> bytes:
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
|
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L207-L233
|
[
"def decode_ulid(value: str) -> bytes:\n \"\"\"\n Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID\n strings specifically and is not meant for arbitrary decoding.\n\n :param value: String to decode\n :type value: :class:`~str`\n :return: Value decoded from Base32 string\n :rtype: :class:`~bytes`\n :raises ValueError: when value is not 26 characters\n :raises ValueError: when value cannot be encoded in ASCII\n \"\"\"\n encoded = str_to_bytes(value, 26)\n\n decoding = DECODING\n\n return bytes((\n ((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,\n ((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,\n ((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,\n ((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,\n ((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,\n ((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,\n ((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,\n ((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,\n ((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,\n ((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,\n ((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,\n ((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,\n ((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,\n ((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,\n ((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,\n ((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF\n ))\n",
"def decode_timestamp(timestamp: str) -> bytes:\n \"\"\"\n Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.\n\n The given :class:`~str` are expected to represent the first 10 characters of a ULID, which\n are the timestamp in milliseconds.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID\n strings specifically and is not meant for arbitrary decoding.\n\n :param timestamp: String to decode\n :type timestamp: :class:`~str`\n :return: Value decoded from Base32 string\n :rtype: :class:`~bytes`\n :raises ValueError: when value is not 10 characters\n :raises ValueError: when value cannot be encoded in ASCII\n \"\"\"\n encoded = str_to_bytes(timestamp, 10)\n\n decoding = DECODING\n\n return bytes((\n ((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,\n ((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,\n ((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,\n ((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,\n ((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,\n ((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF\n ))\n",
"def decode_randomness(randomness: str) -> bytes:\n \"\"\"\n Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.\n\n The given :class:`~str` are expected to represent the last 16 characters of a ULID, which\n are cryptographically secure random values.\n\n .. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID\n strings specifically and is not meant for arbitrary decoding.\n\n :param randomness: String to decode\n :type randomness: :class:`~str`\n :return: Value decoded from Base32 string\n :rtype: :class:`~bytes`\n :raises ValueError: when value is not 16 characters\n :raises ValueError: when value cannot be encoded in ASCII\n \"\"\"\n encoded = str_to_bytes(randomness, 16)\n\n decoding = DECODING\n\n return bytes((\n ((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,\n ((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,\n ((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,\n ((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,\n ((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,\n ((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,\n ((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,\n ((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,\n ((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,\n ((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF\n ))\n"
] |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
decode_ulid
|
python
|
def decode_ulid(value: str) -> bytes:
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
|
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L236-L271
|
[
"def str_to_bytes(value: str, expected_length: int) -> bytes:\n \"\"\"\n Convert the given string to bytes and validate it is within the Base32 character set.\n\n :param value: String to convert to bytes\n :type value: :class:`~str`\n :param expected_length: Expected length of the input string\n :type expected_length: :class:`~int`\n :return: Value converted to bytes.\n :rtype: :class:`~bytes`\n \"\"\"\n length = len(value)\n if length != expected_length:\n raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))\n\n try:\n encoded = value.encode('ascii')\n except UnicodeEncodeError as ex:\n raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))\n\n decoding = DECODING\n\n # Confirm all bytes are valid Base32 decode characters.\n # Note: ASCII encoding handles the out of range checking for us.\n for byte in encoded:\n if decoding[byte] > 31:\n raise ValueError('Non-base32 character found: \"{}\"'.format(chr(byte)))\n\n return encoded\n"
] |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
decode_timestamp
|
python
|
def decode_timestamp(timestamp: str) -> bytes:
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
|
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L274-L302
|
[
"def str_to_bytes(value: str, expected_length: int) -> bytes:\n \"\"\"\n Convert the given string to bytes and validate it is within the Base32 character set.\n\n :param value: String to convert to bytes\n :type value: :class:`~str`\n :param expected_length: Expected length of the input string\n :type expected_length: :class:`~int`\n :return: Value converted to bytes.\n :rtype: :class:`~bytes`\n \"\"\"\n length = len(value)\n if length != expected_length:\n raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))\n\n try:\n encoded = value.encode('ascii')\n except UnicodeEncodeError as ex:\n raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))\n\n decoding = DECODING\n\n # Confirm all bytes are valid Base32 decode characters.\n # Note: ASCII encoding handles the out of range checking for us.\n for byte in encoded:\n if decoding[byte] > 31:\n raise ValueError('Non-base32 character found: \"{}\"'.format(chr(byte)))\n\n return encoded\n"
] |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.