repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
heigeo/climata
|
climata/huc8/__init__.py
|
get_huc8
|
python
|
def get_huc8(prefix):
if not prefix.isdigit():
# Look up hucs by name
name = prefix
prefix = None
for row in hucs:
if row.basin.lower() == name.lower():
# Use most general huc if two have the same name
if prefix is None or len(row.huc) < len(prefix):
prefix = row.huc
if prefix is None:
return []
huc8s = []
for row in hucs:
# Return all 8-digit hucs with given prefix
if len(row.huc) == 8 and row.huc.startswith(prefix):
huc8s.append(row.huc)
return huc8s
|
Return all HUC8s matching the given prefix (e.g. 1801) or basin name
(e.g. Klamath)
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/huc8/__init__.py#L23-L46
| null |
from wq.io import NetLoader, TupleMapper, BaseIO
from climata.parsers import RdbParser
class HucIO(NetLoader, RdbParser, TupleMapper, BaseIO):
url = "http://water.usgs.gov/GIS/new_huc_rdb.txt"
def parse(self):
super(HucIO, self).parse()
# FIXME: new_huc_rdb.txt isn't a valid RDB file; remove non-digit text
# at end of file.
for i in range(0, 100):
val = self.data[-i - 1].get('huc', None) or ''
if val.isdigit():
break
self.data = self.data[:-i]
hucs = list(HucIO())
|
heigeo/climata
|
climata/acis/__init__.py
|
StationMetaIO.parse
|
python
|
def parse(self):
super(AcisIO, self).parse()
# This is more of a "mapping" step than a "parsing" step, but mappers
# only allow one-to-one mapping from input fields to output fields.
for row in self.data:
if 'meta' in row:
row = row['meta']
if 'll' in row:
row['longitude'], row['latitude'] = row['ll']
del row['ll']
|
Convert ACIS 'll' value into separate latitude and longitude.
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L80-L93
| null |
class StationMetaIO(AcisIO):
"""
Retrieves metadata about the climate stations in a region.
See http://data.rcc-acis.org/doc/#title8
"""
namespace = "meta" # For wq.io.parsers.text.JsonParser
path = "StnMeta"
# These options are not required for StationMetaIO
start_date = DateOpt(url_param='sdate')
end_date = DateOpt(url_param='edate')
parameter = ParameterOpt()
def map_value(self, field, value):
"""
Clean up some values returned from the web service.
(overrides wq.io.mappers.BaseMapper)
"""
if field == 'sids':
# Site identifiers are returned as "[id] [auth_id]";
# Map to auth name for easier usability
ids = {}
for idinfo in value:
id, auth = idinfo.split(' ')
auth = AUTHORITY_BY_ID[auth]
ids[auth['name']] = id
return ids
if field == 'valid_daterange':
# Date ranges for each element are returned in an array
# (sorted by the order the elements were were requested);
# Convert to dictionary with element id as key
elems, complex = self.getlist('parameter')
ranges = {}
for elem, val in zip(elems, value):
if val:
start, end = val
ranges[elem] = (parse_date(start), parse_date(end))
else:
ranges[elem] = None, None
return ranges
return value
|
heigeo/climata
|
climata/acis/__init__.py
|
StationMetaIO.map_value
|
python
|
def map_value(self, field, value):
if field == 'sids':
# Site identifiers are returned as "[id] [auth_id]";
# Map to auth name for easier usability
ids = {}
for idinfo in value:
id, auth = idinfo.split(' ')
auth = AUTHORITY_BY_ID[auth]
ids[auth['name']] = id
return ids
if field == 'valid_daterange':
# Date ranges for each element are returned in an array
# (sorted by the order the elements were were requested);
# Convert to dictionary with element id as key
elems, complex = self.getlist('parameter')
ranges = {}
for elem, val in zip(elems, value):
if val:
start, end = val
ranges[elem] = (parse_date(start), parse_date(end))
else:
ranges[elem] = None, None
return ranges
return value
|
Clean up some values returned from the web service.
(overrides wq.io.mappers.BaseMapper)
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L95-L124
| null |
class StationMetaIO(AcisIO):
"""
Retrieves metadata about the climate stations in a region.
See http://data.rcc-acis.org/doc/#title8
"""
namespace = "meta" # For wq.io.parsers.text.JsonParser
path = "StnMeta"
# These options are not required for StationMetaIO
start_date = DateOpt(url_param='sdate')
end_date = DateOpt(url_param='edate')
parameter = ParameterOpt()
def parse(self):
"""
Convert ACIS 'll' value into separate latitude and longitude.
"""
super(AcisIO, self).parse()
# This is more of a "mapping" step than a "parsing" step, but mappers
# only allow one-to-one mapping from input fields to output fields.
for row in self.data:
if 'meta' in row:
row = row['meta']
if 'll' in row:
row['longitude'], row['latitude'] = row['ll']
del row['ll']
|
heigeo/climata
|
climata/acis/__init__.py
|
StationDataIO.get_field_names
|
python
|
def get_field_names(self):
field_names = super(StationDataIO, self).get_field_names()
if set(field_names) == set(['meta', 'data']):
meta_fields = list(self.data[0]['meta'].keys())
if set(meta_fields) < set(self.getvalue('meta')):
meta_fields = self.getvalue('meta')
field_names = list(meta_fields) + ['data']
return field_names
|
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L147-L158
| null |
class StationDataIO(StationMetaIO):
"""
Retrieve daily time series data from the climate stations in a region.
See http://data.rcc-acis.org/doc/#title19
"""
nested = True
namespace = "data" # For wq.io.parsers.text.JsonParser
path = "MultiStnData"
# Specify ACIS-defined URL parameters for start/end date
start_date = DateOpt(required=True, url_param='sdate')
end_date = DateOpt(required=True, url_param='edate')
parameter = ParameterOpt(required=True)
# Additional information for daily results
add = ChoiceOpt(multi=True, choices=ADD_IDS)
def serialize_params(self, params, complex):
# If set, apply "add" option to each requested element / parameter
# (Rather than as a top-level URL param)
if 'add' in params:
complex = True
elems = []
for elem in params.get('parameter', []):
if not isinstance(elem, dict):
elem = {'name': elem}
elem['add'] = ",".join(params['add'])
elems.append(elem)
params['parameter'] = elems
del params['add']
return super(StationDataIO, self).serialize_params(params, complex)
def usable_item(self, data):
"""
ACIS web service returns "meta" and "data" for each station; use meta
attributes as item values, and add an IO for iterating over "data"
"""
# Use metadata as item
item = data['meta']
# Add nested IO for data
elems, elems_is_complex = self.getlist('parameter')
if elems_is_complex:
elems = [elem['name'] for elem in elems]
add, add_is_complex = self.getlist('add')
item['data'] = DataIO(
data=data['data'],
parameter=elems,
add=add,
start_date=self.getvalue('start_date'),
end_date=self.getvalue('end_date'),
)
# TupleMapper will convert item to namedtuple
return super(StationDataIO, self).usable_item(item)
|
heigeo/climata
|
climata/acis/__init__.py
|
StationDataIO.usable_item
|
python
|
def usable_item(self, data):
# Use metadata as item
item = data['meta']
# Add nested IO for data
elems, elems_is_complex = self.getlist('parameter')
if elems_is_complex:
elems = [elem['name'] for elem in elems]
add, add_is_complex = self.getlist('add')
item['data'] = DataIO(
data=data['data'],
parameter=elems,
add=add,
start_date=self.getvalue('start_date'),
end_date=self.getvalue('end_date'),
)
# TupleMapper will convert item to namedtuple
return super(StationDataIO, self).usable_item(item)
|
ACIS web service returns "meta" and "data" for each station; use meta
attributes as item values, and add an IO for iterating over "data"
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L175-L199
| null |
class StationDataIO(StationMetaIO):
"""
Retrieve daily time series data from the climate stations in a region.
See http://data.rcc-acis.org/doc/#title19
"""
nested = True
namespace = "data" # For wq.io.parsers.text.JsonParser
path = "MultiStnData"
# Specify ACIS-defined URL parameters for start/end date
start_date = DateOpt(required=True, url_param='sdate')
end_date = DateOpt(required=True, url_param='edate')
parameter = ParameterOpt(required=True)
# Additional information for daily results
add = ChoiceOpt(multi=True, choices=ADD_IDS)
def get_field_names(self):
"""
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
"""
field_names = super(StationDataIO, self).get_field_names()
if set(field_names) == set(['meta', 'data']):
meta_fields = list(self.data[0]['meta'].keys())
if set(meta_fields) < set(self.getvalue('meta')):
meta_fields = self.getvalue('meta')
field_names = list(meta_fields) + ['data']
return field_names
def serialize_params(self, params, complex):
# If set, apply "add" option to each requested element / parameter
# (Rather than as a top-level URL param)
if 'add' in params:
complex = True
elems = []
for elem in params.get('parameter', []):
if not isinstance(elem, dict):
elem = {'name': elem}
elem['add'] = ",".join(params['add'])
elems.append(elem)
params['parameter'] = elems
del params['add']
return super(StationDataIO, self).serialize_params(params, complex)
|
heigeo/climata
|
climata/acis/__init__.py
|
DataIO.load_data
|
python
|
def load_data(self, data):
dates = fill_date_range(self.start_date, self.end_date)
for row, date in zip(data, dates):
data = {'date': date}
if self.add:
# If self.add is set, results will contain additional
# attributes (e.g. flags). In that case, create one row per
# result, with attributes "date", "elem", "value", and one for
# each item in self.add.
for elem, vals in zip(self.parameter, row):
data['elem'] = elem
for add, val in zip(['value'] + self.add, vals):
data[add] = val
yield data
else:
# Otherwise, return one row per date, with "date" and each
# element's value as attributes.
for elem, val in zip(self.parameter, row):
# namedtuple doesn't like numeric field names
if elem.isdigit():
elem = "e%s" % elem
data[elem] = val
yield data
|
MultiStnData data results are arrays without explicit dates;
Infer time series based on start date.
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L216-L243
|
[
"def fill_date_range(start_date, end_date, date_format=None):\n \"\"\"\n Function accepts start date, end date, and format (if dates are strings)\n and returns a list of Python dates.\n \"\"\"\n\n if date_format:\n start_date = datetime.strptime(start_date, date_format).date()\n end_date = datetime.strptime(end_date, date_format).date()\n date_list = []\n while start_date <= end_date:\n date_list.append(start_date)\n start_date = start_date + timedelta(days=1)\n return date_list\n"
] |
class DataIO(TimeSeriesMapper, BaseIO):
"""
IO for iterating over ACIS time series data.
Created internally by StationDataIO; not meant to be used directly.
"""
# Inherited from parent
parameter = []
add = []
start_date = None
end_date = None
date_formats = [] # For TimeSeriesMapper
def __init__(self, *args, **kwargs):
data = kwargs.pop('data')
super(DataIO, self).__init__(*args, **kwargs)
self.data = list(self.load_data(data))
def get_field_names(self):
"""
Different field names depending on self.add setting (see load_data)
For BaseIO
"""
if self.add:
return ['date', 'elem', 'value'] + [flag for flag in self.add]
else:
field_names = ['date']
for elem in self.parameter:
# namedtuple doesn't like numeric field names
if elem.isdigit():
elem = "e%s" % elem
field_names.append(elem)
return field_names
@property
def key_fields(self):
"""
Different key fields depending on self.add setting (see load_data)
For TimeSeriesMapper
"""
if self.add:
return ['date', 'elem']
else:
return ['date']
|
heigeo/climata
|
climata/acis/__init__.py
|
DataIO.get_field_names
|
python
|
def get_field_names(self):
if self.add:
return ['date', 'elem', 'value'] + [flag for flag in self.add]
else:
field_names = ['date']
for elem in self.parameter:
# namedtuple doesn't like numeric field names
if elem.isdigit():
elem = "e%s" % elem
field_names.append(elem)
return field_names
|
Different field names depending on self.add setting (see load_data)
For BaseIO
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/acis/__init__.py#L250-L264
| null |
class DataIO(TimeSeriesMapper, BaseIO):
"""
IO for iterating over ACIS time series data.
Created internally by StationDataIO; not meant to be used directly.
"""
# Inherited from parent
parameter = []
add = []
start_date = None
end_date = None
date_formats = [] # For TimeSeriesMapper
def load_data(self, data):
"""
MultiStnData data results are arrays without explicit dates;
Infer time series based on start date.
"""
dates = fill_date_range(self.start_date, self.end_date)
for row, date in zip(data, dates):
data = {'date': date}
if self.add:
# If self.add is set, results will contain additional
# attributes (e.g. flags). In that case, create one row per
# result, with attributes "date", "elem", "value", and one for
# each item in self.add.
for elem, vals in zip(self.parameter, row):
data['elem'] = elem
for add, val in zip(['value'] + self.add, vals):
data[add] = val
yield data
else:
# Otherwise, return one row per date, with "date" and each
# element's value as attributes.
for elem, val in zip(self.parameter, row):
# namedtuple doesn't like numeric field names
if elem.isdigit():
elem = "e%s" % elem
data[elem] = val
yield data
def __init__(self, *args, **kwargs):
data = kwargs.pop('data')
super(DataIO, self).__init__(*args, **kwargs)
self.data = list(self.load_data(data))
@property
def key_fields(self):
"""
Different key fields depending on self.add setting (see load_data)
For TimeSeriesMapper
"""
if self.add:
return ['date', 'elem']
else:
return ['date']
|
heigeo/climata
|
climata/base.py
|
fill_date_range
|
python
|
def fill_date_range(start_date, end_date, date_format=None):
if date_format:
start_date = datetime.strptime(start_date, date_format).date()
end_date = datetime.strptime(end_date, date_format).date()
date_list = []
while start_date <= end_date:
date_list.append(start_date)
start_date = start_date + timedelta(days=1)
return date_list
|
Function accepts start date, end date, and format (if dates are strings)
and returns a list of Python dates.
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L252-L265
| null |
from warnings import warn
from .version import VERSION
from datetime import datetime, timedelta
from wq.io import make_date_mapper, NetLoader, Zipper
parse_date = make_date_mapper('%Y-%m-%d')
class FilterOpt(object):
"""
Base class for describing a filter option
"""
name = None # Option name as defined on IO class
url_param = None # Actual URL parameter to use (defaults to name)
required = False # Whether option is equired for valid request
multi = False # Whether multiple values are allowed
ignored = False # Used on subclasses when option does not apply
default = None # Default value
def __init__(self, **kwargs):
"""
Allow setting the above via kwargs
"""
self.__dict__.update(**kwargs)
def get_url_param(self):
return self.url_param or self.name
def parse(self, value):
"""
Enforce rules and return parsed value
"""
if self.required and value is None:
raise ValueError("%s is required!" % self.name)
elif self.ignored and value is not None:
warn("%s is ignored for this class!" % self.name)
elif not self.multi and isinstance(value, (list, tuple)):
if len(value) > 1:
raise ValueError(
"%s does not accept multiple values!" % self.name
)
return value[0]
elif self.multi and value is not None:
if not isinstance(value, (list, tuple)):
return [value]
return value
class DateOpt(FilterOpt):
date_only = True
def parse_date(self, value):
return parse_date(value)
def parse(self, value):
"""
Parse date
"""
value = super(DateOpt, self).parse(value)
if value is None:
return None
if isinstance(value, str):
value = self.parse_date(value)
if isinstance(value, datetime) and self.date_only:
value = value.date()
return value
class ChoiceOpt(FilterOpt):
choices = [] # Valid choices for this option
def parse(self, value):
value = super(ChoiceOpt, self).parse(value)
if value is None:
return None
if isinstance(value, (list, tuple)):
check_values = value
else:
check_values = [value]
for cv in check_values:
if cv not in self.choices:
raise ValueError(
"%s is not a valid choice for %s!" % (cv, self.name)
)
return value
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
class ZipWebserviceLoader(Zipper, WebserviceLoader):
binary = True
def load(self):
super(ZipWebserviceLoader, self).load()
self.unzip_file()
def as_list(value):
if isinstance(value, (list, tuple)):
return value
else:
return [value]
|
heigeo/climata
|
climata/base.py
|
FilterOpt.parse
|
python
|
def parse(self, value):
if self.required and value is None:
raise ValueError("%s is required!" % self.name)
elif self.ignored and value is not None:
warn("%s is ignored for this class!" % self.name)
elif not self.multi and isinstance(value, (list, tuple)):
if len(value) > 1:
raise ValueError(
"%s does not accept multiple values!" % self.name
)
return value[0]
elif self.multi and value is not None:
if not isinstance(value, (list, tuple)):
return [value]
return value
|
Enforce rules and return parsed value
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L32-L49
| null |
class FilterOpt(object):
"""
Base class for describing a filter option
"""
name = None # Option name as defined on IO class
url_param = None # Actual URL parameter to use (defaults to name)
required = False # Whether option is equired for valid request
multi = False # Whether multiple values are allowed
ignored = False # Used on subclasses when option does not apply
default = None # Default value
def __init__(self, **kwargs):
"""
Allow setting the above via kwargs
"""
self.__dict__.update(**kwargs)
def get_url_param(self):
return self.url_param or self.name
|
heigeo/climata
|
climata/base.py
|
DateOpt.parse
|
python
|
def parse(self, value):
value = super(DateOpt, self).parse(value)
if value is None:
return None
if isinstance(value, str):
value = self.parse_date(value)
if isinstance(value, datetime) and self.date_only:
value = value.date()
return value
|
Parse date
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L58-L69
|
[
"def parse(self, value):\n \"\"\"\n Enforce rules and return parsed value\n \"\"\"\n if self.required and value is None:\n raise ValueError(\"%s is required!\" % self.name)\n elif self.ignored and value is not None:\n warn(\"%s is ignored for this class!\" % self.name)\n elif not self.multi and isinstance(value, (list, tuple)):\n if len(value) > 1:\n raise ValueError(\n \"%s does not accept multiple values!\" % self.name\n )\n return value[0]\n elif self.multi and value is not None:\n if not isinstance(value, (list, tuple)):\n return [value]\n return value\n",
"def parse_date(self, value):\n return parse_date(value)\n"
] |
class DateOpt(FilterOpt):
date_only = True
def parse_date(self, value):
return parse_date(value)
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.get_filter_options
|
python
|
def get_filter_options(cls):
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
|
List all filter options defined on class (and superclasses)
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L135-L151
| null |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.getlist
|
python
|
def getlist(self, name):
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
|
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L163-L185
|
[
"def as_list(value):\n if isinstance(value, (list, tuple)):\n return value\n else:\n return [value]\n"
] |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.set_param
|
python
|
def set_param(self, into, name):
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
|
Set parameter key, noting whether list value is "complex"
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L187-L194
| null |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.get_params
|
python
|
def get_params(self):
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
|
Get parameters for web service, noting whether any are "complex"
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L196-L208
| null |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.params
|
python
|
def params(self):
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
|
URL parameters for wq.io.loaders.NetLoader
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L211-L218
| null |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def serialize_params(self, params, complex=False):
"""
Serialize parameter names and values to a dict ready for urlencode()
"""
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
heigeo/climata
|
climata/base.py
|
WebserviceLoader.serialize_params
|
python
|
def serialize_params(self, params, complex=False):
if complex:
# See climata.acis for an example implementation
raise NotImplementedError("Cannot serialize %s!" % params)
else:
# Simpler queries can use traditional URL parameters
return {
self.get_url_param(key): ','.join(val)
for key, val in params.items()
}
|
Serialize parameter names and values to a dict ready for urlencode()
|
train
|
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L220-L232
| null |
class WebserviceLoader(NetLoader):
"""
NetLoader subclass with enhanced functionality for enumerating and
validating URL arguments.
"""
# Default filter options, common to most web services for climate data.
# Every climata IO class is assumed to support at least these options; if
# any are not applicable they should be overridden with ignored=True.
# Time period filters
start_date = DateOpt(required=True)
end_date = DateOpt(required=True)
# Region filters
state = FilterOpt(multi=True)
county = FilterOpt(multi=True)
basin = FilterOpt(multi=True)
# Station and parameter code filters
station = FilterOpt(multi=True)
parameter = FilterOpt(multi=True)
# URL params that apply to every request (if any)
default_params = {}
def __init__(self, *args, **kwargs):
"""
Initialize web service (and general IO) options
"""
# Validate web service parameters using FilterOpt information
self._values = {}
for name, opt in self.filter_options.items():
opt.name = name
val = kwargs.pop(name, opt.default)
self._values[name] = opt.parse(val)
# Mimic BaseIO behavior since it's not a super class of NetLoader
if kwargs:
self.__dict__.update(**kwargs)
self.refresh()
@classmethod
def get_filter_options(cls):
"""
List all filter options defined on class (and superclasses)
"""
attr = '_filter_options_%s' % id(cls)
options = getattr(cls, attr, {})
if options:
return options
for key in dir(cls):
val = getattr(cls, key)
if isinstance(val, FilterOpt):
options[key] = val
setattr(cls, attr, options)
return options
@property
def filter_options(self):
return type(self).get_filter_options()
def get_url_param(self, key):
return self.filter_options[key].get_url_param()
def getvalue(self, name):
return self._values[name]
def getlist(self, name):
"""
Retrieve given property from class/instance, ensuring it is a list.
Also determine whether the list contains simple text/numeric values or
nested dictionaries (a "complex" list)
"""
value = self.getvalue(name)
complex = {}
def str_value(val):
# TODO: nonlocal complex
if isinstance(val, dict):
complex['complex'] = True
return val
else:
return str(val)
if value is None:
pass
else:
value = [str_value(val) for val in as_list(value)]
return value, bool(complex)
def set_param(self, into, name):
"""
Set parameter key, noting whether list value is "complex"
"""
value, complex = self.getlist(name)
if value is not None:
into[name] = value
return complex
def get_params(self):
"""
Get parameters for web service, noting whether any are "complex"
"""
params = {}
complex = False
for name, opt in self.filter_options.items():
if opt.ignored:
continue
if self.set_param(params, name):
complex = True
return params, complex
@property
def params(self):
"""
URL parameters for wq.io.loaders.NetLoader
"""
params, complex = self.get_params()
url_params = self.default_params.copy()
url_params.update(self.serialize_params(params, complex))
return url_params
@property
def user_agent(self):
agent = "climata/%s %s %s" % (
VERSION,
super(WebserviceLoader, self).user_agent,
"https://github.com/heigeo/climata",
)
return agent
|
cpburnz/python-path-specification
|
pathspec/pathspec.py
|
PathSpec.from_lines
|
python
|
def from_lines(cls, pattern_factory, lines):
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if isinstance(lines, (bytes, unicode)):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines)
|
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L50-L75
|
[
"def lookup_pattern(name):\n\t\"\"\"\n\tLookups a registered pattern factory by name.\n\n\t*name* (:class:`str`) is the name of the pattern factory.\n\n\tReturns the registered pattern factory (:class:`~collections.abc.Callable`).\n\tIf no pattern factory is registered, raises :exc:`KeyError`.\n\t\"\"\"\n\treturn _registered_patterns[name]\n"
] |
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns):
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other):
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = izip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self):
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
@classmethod
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file)
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
if isinstance(files, (bytes, unicode)):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path]
def match_tree(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files)
|
cpburnz/python-path-specification
|
pathspec/pathspec.py
|
PathSpec.match_file
|
python
|
def match_file(self, file, separators=None):
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file)
|
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L77-L91
|
[
"def match_file(patterns, file):\n\t\"\"\"\n\tMatches the file to the patterns.\n\n\t*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n\tcontains the patterns to use.\n\n\t*file* (:class:`str`) is the normalized file path to be matched\n\tagainst *patterns*.\n\n\tReturns :data:`True` if *file* matched; otherwise, :data:`False`.\n\t\"\"\"\n\tmatched = False\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tif file in pattern.match((file,)):\n\t\t\t\tmatched = pattern.include\n\treturn matched\n",
"def normalize_file(file, separators=None):\n\t\"\"\"\n\tNormalizes the file path to use the POSIX path separator (i.e., ``'/'``).\n\n\t*file* (:class:`str`) is the file path.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tThis does not need to include the POSIX path separator (``'/'``), but\n\tincluding it will not affect the results. Default is :data:`None` for\n\t:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty\n\tcontainer (e.g., an empty tuple ``()``).\n\n\tReturns the normalized file path (:class:`str`).\n\t\"\"\"\n\t# Normalize path separators.\n\tif separators is None:\n\t\tseparators = NORMALIZE_PATH_SEPS\n\tnorm_file = file\n\tfor sep in separators:\n\t\tnorm_file = norm_file.replace(sep, posixpath.sep)\n\n\t# Remove current directory prefix.\n\tif norm_file.startswith('./'):\n\t\tnorm_file = norm_file[2:]\n\n\treturn norm_file\n"
] |
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns):
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other):
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = izip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self):
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
@classmethod
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if isinstance(lines, (bytes, unicode)):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines)
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
if isinstance(files, (bytes, unicode)):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path]
def match_tree(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files)
|
cpburnz/python-path-specification
|
pathspec/pathspec.py
|
PathSpec.match_files
|
python
|
def match_files(self, files, separators=None):
if isinstance(files, (bytes, unicode)):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path]
|
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L93-L115
|
[
"def match_files(patterns, files):\n\t\"\"\"\n\tMatches the files to the patterns.\n\n\t*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n\tcontains the patterns to use.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe normalized file paths to be matched against *patterns*.\n\n\tReturns the matched files (:class:`set` of :class:`str`).\n\t\"\"\"\n\tall_files = files if isinstance(files, collection_type) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files\n",
"def iterkeys(mapping):\n\treturn mapping.keys()\n",
"def normalize_files(files, separators=None):\n\t\"\"\"\n\tNormalizes the file paths to use the POSIX path separator.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe file paths to be normalized.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tSee :func:`normalize_file` for more information.\n\n\tReturns a :class:`dict` mapping the each normalized file path (:class:`str`)\n\tto the original file path (:class:`str`)\n\t\"\"\"\n\tnorm_files = {}\n\tfor path in files:\n\t\tnorm_files[normalize_file(path, separators=separators)] = path\n\treturn norm_files\n"
] |
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns):
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other):
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = izip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self):
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
@classmethod
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if isinstance(lines, (bytes, unicode)):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines)
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file)
def match_tree(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files)
|
cpburnz/python-path-specification
|
pathspec/pathspec.py
|
PathSpec.match_tree
|
python
|
def match_tree(self, root, on_error=None, follow_links=None):
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links)
return self.match_files(files)
|
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. See
:func:`~pathspec.util.iter_tree` for more information.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. See
:func:`~pathspec.util.iter_tree` for more information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L117-L137
|
[
"def iter_tree(root, on_error=None, follow_links=None):\n\t\"\"\"\n\tWalks the specified directory for all files.\n\n\t*root* (:class:`str`) is the root directory to search for files.\n\n\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\toptionally is the error handler for file-system exceptions. It will be\n\tcalled with the exception (:exc:`OSError`). Reraise the exception to\n\tabort the walk. Default is :data:`None` to ignore file-system\n\texceptions.\n\n\t*follow_links* (:class:`bool` or :data:`None`) optionally is whether\n\tto walk symbolik links that resolve to directories. Default is\n\t:data:`None` for :data:`True`.\n\n\tRaises :exc:`RecursionError` if recursion is detected.\n\n\tReturns an :class:`~collections.abc.Iterable` yielding the path to\n\teach file (:class:`str`) relative to *root*.\n\t\"\"\"\n\tif on_error is not None and not callable(on_error):\n\t\traise TypeError(\"on_error:{!r} is not callable.\".format(on_error))\n\n\tif follow_links is None:\n\t\tfollow_links = True\n\n\tfor file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):\n\t\tyield file_rel\n",
"def match_files(self, files, separators=None):\n\t\"\"\"\n\tMatches the files to this path-spec.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe file paths to be matched against :attr:`self.patterns\n\t<PathSpec.patterns>`.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`;\n\tor :data:`None`) optionally contains the path separators to\n\tnormalize. See :func:`~pathspec.util.normalize_file` for more\n\tinformation.\n\n\tReturns the matched files (:class:`~collections.abc.Iterable` of\n\t:class:`str`).\n\t\"\"\"\n\tif isinstance(files, (bytes, unicode)):\n\t\traise TypeError(\"files:{!r} is not an iterable.\".format(files))\n\n\tfile_map = util.normalize_files(files, separators=separators)\n\tmatched_files = util.match_files(self.patterns, iterkeys(file_map))\n\tfor path in matched_files:\n\t\tyield file_map[path]\n"
] |
class PathSpec(object):
"""
The :class:`PathSpec` class is a wrapper around a list of compiled
:class:`.Pattern` instances.
"""
def __init__(self, patterns):
"""
Initializes the :class:`PathSpec` instance.
*patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
yields each compiled pattern (:class:`.Pattern`).
"""
self.patterns = patterns if isinstance(patterns, collection_type) else list(patterns)
"""
*patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
contains the compiled patterns.
"""
def __eq__(self, other):
"""
Tests the equality of this path-spec with *other* (:class:`PathSpec`)
by comparing their :attr:`~PathSpec.patterns` attributes.
"""
if isinstance(other, PathSpec):
paired_patterns = izip_longest(self.patterns, other.patterns)
return all(a == b for a, b in paired_patterns)
else:
return NotImplemented
def __len__(self):
"""
Returns the number of compiled patterns this path-spec contains
(:class:`int`).
"""
return len(self.patterns)
@classmethod
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
pattern (:class:`str`). This simply has to yield each line so it can
be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
or the result from :meth:`str.splitlines`.
Returns the :class:`PathSpec` instance.
"""
if isinstance(pattern_factory, string_types):
pattern_factory = util.lookup_pattern(pattern_factory)
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if isinstance(lines, (bytes, unicode)):
raise TypeError("lines:{!r} is not an iterable.".format(lines))
lines = [pattern_factory(line) for line in lines if line]
return cls(lines)
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normalize. See
:func:`~pathspec.util.normalize_file` for more information.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
norm_file = util.normalize_file(file, separators=separators)
return util.match_file(self.patterns, norm_file)
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
or :data:`None`) optionally contains the path separators to
normalize. See :func:`~pathspec.util.normalize_file` for more
information.
Returns the matched files (:class:`~collections.abc.Iterable` of
:class:`str`).
"""
if isinstance(files, (bytes, unicode)):
raise TypeError("files:{!r} is not an iterable.".format(files))
file_map = util.normalize_files(files, separators=separators)
matched_files = util.match_files(self.patterns, iterkeys(file_map))
for path in matched_files:
yield file_map[path]
|
cpburnz/python-path-specification
|
pathspec/patterns/gitwildmatch.py
|
GitWildMatchPattern.pattern_to_regex
|
python
|
def pattern_to_regex(cls, pattern):
if isinstance(pattern, unicode):
return_type = unicode
elif isinstance(pattern, bytes):
return_type = bytes
pattern = pattern.decode(_BYTES_ENCODING)
else:
raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
pattern = pattern.strip()
if pattern.startswith('#'):
# A pattern starting with a hash ('#') serves as a comment
# (neither includes nor excludes files). Escape the hash with a
# back-slash to match a literal hash (i.e., '\#').
regex = None
include = None
elif pattern == '/':
# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
# '/' does not match any file.
regex = None
include = None
elif pattern:
if pattern.startswith('!'):
# A pattern starting with an exclamation mark ('!') negates the
# pattern (exclude instead of include). Escape the exclamation
# mark with a back-slash to match a literal exclamation mark
# (i.e., '\!').
include = False
# Remove leading exclamation mark.
pattern = pattern[1:]
else:
include = True
if pattern.startswith('\\'):
# Remove leading back-slash escape for escaped hash ('#') or
# exclamation mark ('!').
pattern = pattern[1:]
# Split pattern into segments.
pattern_segs = pattern.split('/')
# Normalize pattern to make processing easier.
if not pattern_segs[0]:
# A pattern beginning with a slash ('/') will only match paths
# directly on the root directory instead of any descendant
# paths. So, remove empty first segment to make pattern relative
# to root.
del pattern_segs[0]
elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
# A single pattern without a beginning slash ('/') will match
# any descendant path. This is equivalent to "**/{pattern}". So,
# prepend with double-asterisks to make pattern relative to
# root.
# EDGE CASE: This also holds for a single pattern with a
# trailing slash (e.g. dir/).
if pattern_segs[0] != '**':
pattern_segs.insert(0, '**')
else:
# EDGE CASE: A pattern without a beginning slash ('/') but
# contains at least one prepended directory (e.g.
# "dir/{pattern}") should not match "**/dir/{pattern}",
# according to `git check-ignore` (v2.4.1).
pass
if not pattern_segs[-1] and len(pattern_segs) > 1:
# A pattern ending with a slash ('/') will match all descendant
# paths if it is a directory but not if it is a regular file.
# This is equivilent to "{pattern}/**". So, set last segment to
# double asterisks to include all descendants.
pattern_segs[-1] = '**'
# Build regular expression from pattern.
output = ['^']
need_slash = False
end = len(pattern_segs) - 1
for i, seg in enumerate(pattern_segs):
if seg == '**':
if i == 0 and i == end:
# A pattern consisting solely of double-asterisks ('**')
# will match every path.
output.append('.+')
elif i == 0:
# A normalized pattern beginning with double-asterisks
# ('**') will match any leading path segments.
output.append('(?:.+/)?')
need_slash = False
elif i == end:
# A normalized pattern ending with double-asterisks ('**')
# will match any trailing path segments.
output.append('/.*')
else:
# A pattern with inner double-asterisks ('**') will match
# multiple (or zero) inner path segments.
output.append('(?:/.+)?')
need_slash = True
elif seg == '*':
# Match single path segment.
if need_slash:
output.append('/')
output.append('[^/]+')
need_slash = True
else:
# Match segment glob pattern.
if need_slash:
output.append('/')
output.append(cls._translate_segment_glob(seg))
if i == end and include is True:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
# EDGE CASE: However, this does not hold for exclusion cases
# according to `git check-ignore` (v2.4.1).
output.append('(?:/.*)?')
need_slash = True
output.append('$')
regex = ''.join(output)
else:
# A blank pattern is a null-operation (neither includes nor
# excludes files).
regex = None
include = None
if regex is not None and return_type is bytes:
regex = regex.encode(_BYTES_ENCODING)
return regex, include
|
Convert the pattern into a regular expression.
*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
convert into a regular expression.
Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
or :data:`None`), and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L30-L174
| null |
class GitWildMatchPattern(RegexPattern):
"""
The :class:`GitWildMatchPattern` class represents a compiled Git
wildmatch pattern.
"""
# Keep the dict-less class hierarchy.
__slots__ = ()
@classmethod
@staticmethod
def _translate_segment_glob(pattern):
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ''
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == '\\':
# Escape character, escape next character.
escape = True
elif char == '*':
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += '[^/]*'
elif char == '?':
# Single-character wildcard. Match any single character (except
# a slash).
regex += '[^/]'
elif char == '[':
# Braket expression wildcard. Except for the beginning
# exclamation mark, the whole braket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matchs ']', '[' and '!'.
# - "[]-]" matchs ']' and '-'.
# - "[!]a-]" matchs any character except ']', 'a' and '-'.
j = i
# Pass brack expression negation.
if j < end and pattern[j] == '!':
j += 1
# Pass first closing braket if it is at the beginning of the
# expression.
if j < end and pattern[j] == ']':
j += 1
# Find closing braket. Stop once we reach the end or find it.
while j < end and pattern[j] != ']':
j += 1
if j < end:
# Found end of braket expression. Increment j to be one past
# the closing braket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = '['
if pattern[i] == '!':
# Braket expression needs to be negated.
expr += '^'
i += 1
elif pattern[i] == '^':
# POSIX declares that the regex braket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += '\\^'
i += 1
# Build regex braket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace('\\', '\\\\')
# Add regex braket expression to regex result.
regex += expr
# Set i to one past the closing braket.
i = j
else:
# Failed to find closing braket, treat opening braket as a
# braket literal instead of as an expression.
regex += '\\['
else:
# Regular character, escape it for regex.
regex += re.escape(char)
return regex
|
cpburnz/python-path-specification
|
pathspec/patterns/gitwildmatch.py
|
GitWildMatchPattern._translate_segment_glob
|
python
|
def _translate_segment_glob(pattern):
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ''
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == '\\':
# Escape character, escape next character.
escape = True
elif char == '*':
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += '[^/]*'
elif char == '?':
# Single-character wildcard. Match any single character (except
# a slash).
regex += '[^/]'
elif char == '[':
# Braket expression wildcard. Except for the beginning
# exclamation mark, the whole braket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matchs ']', '[' and '!'.
# - "[]-]" matchs ']' and '-'.
# - "[!]a-]" matchs any character except ']', 'a' and '-'.
j = i
# Pass brack expression negation.
if j < end and pattern[j] == '!':
j += 1
# Pass first closing braket if it is at the beginning of the
# expression.
if j < end and pattern[j] == ']':
j += 1
# Find closing braket. Stop once we reach the end or find it.
while j < end and pattern[j] != ']':
j += 1
if j < end:
# Found end of braket expression. Increment j to be one past
# the closing braket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = '['
if pattern[i] == '!':
# Braket expression needs to be negated.
expr += '^'
i += 1
elif pattern[i] == '^':
# POSIX declares that the regex braket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += '\\^'
i += 1
# Build regex braket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace('\\', '\\\\')
# Add regex braket expression to regex result.
regex += expr
# Set i to one past the closing braket.
i = j
else:
# Failed to find closing braket, treat opening braket as a
# braket literal instead of as an expression.
regex += '\\['
else:
# Regular character, escape it for regex.
regex += re.escape(char)
return regex
|
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L177-L280
| null |
class GitWildMatchPattern(RegexPattern):
"""
The :class:`GitWildMatchPattern` class represents a compiled Git
wildmatch pattern.
"""
# Keep the dict-less class hierarchy.
__slots__ = ()
@classmethod
def pattern_to_regex(cls, pattern):
"""
Convert the pattern into a regular expression.
*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
convert into a regular expression.
Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
or :data:`None`), and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`).
"""
if isinstance(pattern, unicode):
return_type = unicode
elif isinstance(pattern, bytes):
return_type = bytes
pattern = pattern.decode(_BYTES_ENCODING)
else:
raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
pattern = pattern.strip()
if pattern.startswith('#'):
# A pattern starting with a hash ('#') serves as a comment
# (neither includes nor excludes files). Escape the hash with a
# back-slash to match a literal hash (i.e., '\#').
regex = None
include = None
elif pattern == '/':
# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
# '/' does not match any file.
regex = None
include = None
elif pattern:
if pattern.startswith('!'):
# A pattern starting with an exclamation mark ('!') negates the
# pattern (exclude instead of include). Escape the exclamation
# mark with a back-slash to match a literal exclamation mark
# (i.e., '\!').
include = False
# Remove leading exclamation mark.
pattern = pattern[1:]
else:
include = True
if pattern.startswith('\\'):
# Remove leading back-slash escape for escaped hash ('#') or
# exclamation mark ('!').
pattern = pattern[1:]
# Split pattern into segments.
pattern_segs = pattern.split('/')
# Normalize pattern to make processing easier.
if not pattern_segs[0]:
# A pattern beginning with a slash ('/') will only match paths
# directly on the root directory instead of any descendant
# paths. So, remove empty first segment to make pattern relative
# to root.
del pattern_segs[0]
elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
# A single pattern without a beginning slash ('/') will match
# any descendant path. This is equivalent to "**/{pattern}". So,
# prepend with double-asterisks to make pattern relative to
# root.
# EDGE CASE: This also holds for a single pattern with a
# trailing slash (e.g. dir/).
if pattern_segs[0] != '**':
pattern_segs.insert(0, '**')
else:
# EDGE CASE: A pattern without a beginning slash ('/') but
# contains at least one prepended directory (e.g.
# "dir/{pattern}") should not match "**/dir/{pattern}",
# according to `git check-ignore` (v2.4.1).
pass
if not pattern_segs[-1] and len(pattern_segs) > 1:
# A pattern ending with a slash ('/') will match all descendant
# paths if it is a directory but not if it is a regular file.
# This is equivilent to "{pattern}/**". So, set last segment to
# double asterisks to include all descendants.
pattern_segs[-1] = '**'
# Build regular expression from pattern.
output = ['^']
need_slash = False
end = len(pattern_segs) - 1
for i, seg in enumerate(pattern_segs):
if seg == '**':
if i == 0 and i == end:
# A pattern consisting solely of double-asterisks ('**')
# will match every path.
output.append('.+')
elif i == 0:
# A normalized pattern beginning with double-asterisks
# ('**') will match any leading path segments.
output.append('(?:.+/)?')
need_slash = False
elif i == end:
# A normalized pattern ending with double-asterisks ('**')
# will match any trailing path segments.
output.append('/.*')
else:
# A pattern with inner double-asterisks ('**') will match
# multiple (or zero) inner path segments.
output.append('(?:/.+)?')
need_slash = True
elif seg == '*':
# Match single path segment.
if need_slash:
output.append('/')
output.append('[^/]+')
need_slash = True
else:
# Match segment glob pattern.
if need_slash:
output.append('/')
output.append(cls._translate_segment_glob(seg))
if i == end and include is True:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
# EDGE CASE: However, this does not hold for exclusion cases
# according to `git check-ignore` (v2.4.1).
output.append('(?:/.*)?')
need_slash = True
output.append('$')
regex = ''.join(output)
else:
# A blank pattern is a null-operation (neither includes nor
# excludes files).
regex = None
include = None
if regex is not None and return_type is bytes:
regex = regex.encode(_BYTES_ENCODING)
return regex, include
@staticmethod
|
cpburnz/python-path-specification
|
pathspec/patterns/gitwildmatch.py
|
GitIgnorePattern.pattern_to_regex
|
python
|
def pattern_to_regex(cls, *args, **kw):
cls._deprecated()
return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
|
Warn about deprecation.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L306-L311
| null |
class GitIgnorePattern(GitWildMatchPattern):
"""
The :class:`GitIgnorePattern` class is deprecated by :class:`GitWildMatchPattern`.
This class only exists to maintain compatibility with v0.4.
"""
def __init__(self, *args, **kw):
"""
Warn about deprecation.
"""
self._deprecated()
return super(GitIgnorePattern, self).__init__(*args, **kw)
@staticmethod
def _deprecated():
"""
Warn about deprecation.
"""
warnings.warn("GitIgnorePattern ('gitignore') is deprecated. Use GitWildMatchPattern ('gitwildmatch') instead.", DeprecationWarning, stacklevel=3)
@classmethod
|
cpburnz/python-path-specification
|
pathspec/util.py
|
iter_tree
|
python
|
def iter_tree(root, on_error=None, follow_links=None):
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
|
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L27-L55
|
[
"def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):\n\t\"\"\"\n\tScan the directory for all descendant files.\n\n\t*root_full* (:class:`str`) the absolute path to the root directory.\n\n\t*dir_rel* (:class:`str`) the path to the directory to scan relative to\n\t*root_full*.\n\n\t*memo* (:class:`dict`) keeps track of ancestor directories\n\tencountered. Maps each ancestor real path (:class:`str``) to relative\n\tpath (:class:`str`).\n\n\t*on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n\toptionally is the error handler for file-system exceptions.\n\n\t*follow_links* (:class:`bool`) is whether to walk symbolik links that\n\tresolve to directories.\n\t\"\"\"\n\tdir_full = os.path.join(root_full, dir_rel)\n\tdir_real = os.path.realpath(dir_full)\n\n\t# Remember each encountered ancestor directory and its canonical\n\t# (real) path. If a canonical path is encountered more than once,\n\t# recursion has occurred.\n\tif dir_real not in memo:\n\t\tmemo[dir_real] = dir_rel\n\telse:\n\t\traise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)\n\n\tfor node in os.listdir(dir_full):\n\t\tnode_rel = os.path.join(dir_rel, node)\n\t\tnode_full = os.path.join(root_full, node_rel)\n\n\t\t# Inspect child node.\n\t\ttry:\n\t\t\tnode_stat = os.lstat(node_full)\n\t\texcept OSError as e:\n\t\t\tif on_error is not None:\n\t\t\t\ton_error(e)\n\t\t\tcontinue\n\n\t\tif stat.S_ISLNK(node_stat.st_mode):\n\t\t\t# Child node is a link, inspect the target node.\n\t\t\tis_link = True\n\t\t\ttry:\n\t\t\t\tnode_stat = os.stat(node_full)\n\t\t\texcept OSError as e:\n\t\t\t\tif on_error is not None:\n\t\t\t\t\ton_error(e)\n\t\t\t\tcontinue\n\t\telse:\n\t\t\tis_link = False\n\n\t\tif stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):\n\t\t\t# Child node is a directory, recurse into it and yield its\n\t\t\t# decendant files.\n\t\t\tfor file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):\n\t\t\t\tyield file_rel\n\n\t\telif stat.S_ISREG(node_stat.st_mode):\n\t\t\t# Child node is a file, yield it.\n\t\t\tyield node_rel\n\n\t# NOTE: Make sure to remove the canonical (real) path of the directory\n\t# from the ancestors memo once we are done with it. This allows the\n\t# same directory to appear multiple times. If this is not done, the\n\t# second occurance of the directory will be incorrectly interpreted as\n\t# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.\n\tdel memo[dir_real]\n"
] |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
_iter_tree_next
|
python
|
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
|
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L57-L126
| null |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
match_file
|
python
|
def match_file(patterns, file):
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
|
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L139-L156
| null |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
match_files
|
python
|
def match_files(patterns, files):
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
|
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L158-L179
| null |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
normalize_file
|
python
|
def normalize_file(file, separators=None):
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
|
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L181-L207
| null |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
normalize_files
|
python
|
def normalize_files(files, separators=None):
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
|
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L209-L226
|
[
"def normalize_file(file, separators=None):\n\t\"\"\"\n\tNormalizes the file path to use the POSIX path separator (i.e., ``'/'``).\n\n\t*file* (:class:`str`) is the file path.\n\n\t*separators* (:class:`~collections.abc.Collection` of :class:`str`; or\n\t:data:`None`) optionally contains the path separators to normalize.\n\tThis does not need to include the POSIX path separator (``'/'``), but\n\tincluding it will not affect the results. Default is :data:`None` for\n\t:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty\n\tcontainer (e.g., an empty tuple ``()``).\n\n\tReturns the normalized file path (:class:`str`).\n\t\"\"\"\n\t# Normalize path separators.\n\tif separators is None:\n\t\tseparators = NORMALIZE_PATH_SEPS\n\tnorm_file = file\n\tfor sep in separators:\n\t\tnorm_file = norm_file.replace(sep, posixpath.sep)\n\n\t# Remove current directory prefix.\n\tif norm_file.startswith('./'):\n\t\tnorm_file = norm_file[2:]\n\n\treturn norm_file\n"
] |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
"""
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
register_pattern
|
python
|
def register_pattern(name, pattern_factory, override=None):
if not isinstance(name, string_types):
raise TypeError("name:{!r} is not a string.".format(name))
if not callable(pattern_factory):
raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
if name in _registered_patterns and not override:
raise AlreadyRegisteredError(name, _registered_patterns[name])
_registered_patterns[name] = pattern_factory
|
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled pattern (:class:`.Pattern`).
*override* (:class:`bool` or :data:`None`) optionally is whether to
allow overriding an already registered pattern under the same name
(:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
(:data:`False`). Default is :data:`None` for :data:`False`.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L228-L250
| null |
# encoding: utf-8
"""
This module provides utility methods for dealing with path-specs.
"""
import os
import os.path
import posixpath
import stat
from .compat import collection_type, string_types
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
"""
*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
separators that need to be normalized to the POSIX separator for the
current operating system. The separators are determined by examining
:data:`os.sep` and :data:`os.altsep`.
"""
_registered_patterns = {}
"""
*_registered_patterns* (``dict``) maps a name (``str``) to the
registered pattern factory (``callable``).
"""
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called with the exception (:exc:`OSError`). Reraise the exception to
abort the walk. Default is :data:`None` to ignore file-system
exceptions.
*follow_links* (:class:`bool` or :data:`None`) optionally is whether
to walk symbolik links that resolve to directories. Default is
:data:`None` for :data:`True`.
Raises :exc:`RecursionError` if recursion is detected.
Returns an :class:`~collections.abc.Iterable` yielding the path to
each file (:class:`str`) relative to *root*.
"""
if on_error is not None and not callable(on_error):
raise TypeError("on_error:{!r} is not callable.".format(on_error))
if follow_links is None:
follow_links = True
for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links):
yield file_rel
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of ancestor directories
encountered. Maps each ancestor real path (:class:`str``) to relative
path (:class:`str`).
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions.
*follow_links* (:class:`bool`) is whether to walk symbolik links that
resolve to directories.
"""
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
for node in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_stat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_stat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# decendant files.
for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links):
yield file_rel
elif stat.S_ISREG(node_stat.st_mode):
# Child node is a file, yield it.
yield node_rel
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurance of the directory will be incorrectly interpreted as
# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
def lookup_pattern(name):
"""
Lookups a registered pattern factory by name.
*name* (:class:`str`) is the name of the pattern factory.
Returns the registered pattern factory (:class:`~collections.abc.Callable`).
If no pattern factory is registered, raises :exc:`KeyError`.
"""
return _registered_patterns[name]
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patterns*.
Returns the matched files (:class:`set` of :class:`str`).
"""
all_files = files if isinstance(files, collection_type) else list(files)
return_files = set()
for pattern in patterns:
if pattern.include is not None:
result_files = pattern.match(all_files)
if pattern.include:
return_files.update(result_files)
else:
return_files.difference_update(result_files)
return return_files
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
norm_file = file
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
# Remove current directory prefix.
if norm_file.startswith('./'):
norm_file = norm_file[2:]
return norm_file
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
See :func:`normalize_file` for more information.
Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
to the original file path (:class:`str`)
"""
norm_files = {}
for path in files:
norm_files[normalize_file(path, separators=separators)] = path
return norm_files
class AlreadyRegisteredError(Exception):
"""
The :exc:`AlreadyRegisteredError` exception is raised when a pattern
factory is registered under a name already in use.
"""
def __init__(self, name, pattern_factory):
"""
Initializes the :exc:`AlreadyRegisteredError` instance.
*name* (:class:`str`) is the name of the registered pattern.
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
name=self.name,
pattern_factory=self.pattern_factory,
)
@property
def name(self):
"""
*name* (:class:`str`) is the name of the registered pattern.
"""
return self.args[0]
@property
def pattern_factory(self):
"""
*pattern_factory* (:class:`~collections.abc.Callable`) is the
registered pattern factory.
"""
return self.args[1]
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/util.py
|
RecursionError.message
|
python
|
def message(self):
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
)
|
*message* (:class:`str`) is the error message.
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L326-L334
| null |
class RecursionError(Exception):
"""
The :exc:`RecursionError` exception is raised when recursion is
detected.
"""
def __init__(self, real_path, first_path, second_path):
"""
Initializes the :exc:`RecursionError` instance.
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
*first_path* (:class:`str`) is the first path encountered for
*real_path*.
*second_path* (:class:`str`) is the second path encountered for
*real_path*.
"""
super(RecursionError, self).__init__(real_path, first_path, second_path)
@property
def first_path(self):
"""
*first_path* (:class:`str`) is the first path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[1]
@property
@property
def real_path(self):
"""
*real_path* (:class:`str`) is the real path that recursion was
encountered on.
"""
return self.args[0]
@property
def second_path(self):
"""
*second_path* (:class:`str`) is the second path encountered for
:attr:`self.real_path <RecursionError.real_path>`.
"""
return self.args[2]
|
cpburnz/python-path-specification
|
pathspec/pattern.py
|
Pattern.match
|
python
|
def match(self, files):
raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
|
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
each file relative to the root directory (e.g., ``"relative/path/to/file"``).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pattern.py#L35-L45
| null |
class Pattern(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ('include',)
def __init__(self, include):
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
|
cpburnz/python-path-specification
|
pathspec/pattern.py
|
RegexPattern.match
|
python
|
def match(self, files):
if self.include is not None:
for path in files:
if self.regex.match(path) is not None:
yield path
|
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
|
train
|
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pattern.py#L116-L129
| null |
class RegexPattern(Pattern):
"""
The :class:`RegexPattern` class is an implementation of a pattern
using regular expressions.
"""
# Make the class dict-less.
__slots__ = ('regex',)
def __init__(self, pattern, include=None):
"""
Initializes the :class:`RegexPattern` instance.
*pattern* (:class:`unicode`, :class:`bytes`, :class:`re.RegexObject`,
or :data:`None`) is the pattern to compile into a regular
expression.
*include* (:class:`bool` or :data:`None`) must be :data:`None`
unless *pattern* is a precompiled regular expression (:class:`re.RegexObject`)
in which case it is whether matched files should be included
(:data:`True`), excluded (:data:`False`), or is a null operation
(:data:`None`).
.. NOTE:: Subclasses do not need to support the *include*
parameter.
"""
self.regex = None
"""
*regex* (:class:`re.RegexObject`) is the regular expression for the
pattern.
"""
if isinstance(pattern, (unicode, bytes)):
assert include is None, "include:{!r} must be null when pattern:{!r} is a string.".format(include, pattern)
regex, include = self.pattern_to_regex(pattern)
# NOTE: Make sure to allow a null regular expression to be
# returned for a null-operation.
if include is not None:
regex = re.compile(regex)
elif pattern is not None and hasattr(pattern, 'match'):
# Assume pattern is a precompiled regular expression.
# - NOTE: Used specified *include*.
regex = pattern
elif pattern is None:
# NOTE: Make sure to allow a null pattern to be passed for a
# null-operation.
assert include is None, "include:{!r} must be null when pattern:{!r} is null.".format(include, pattern)
else:
raise TypeError("pattern:{!r} is not a string, RegexObject, or None.".format(pattern))
super(RegexPattern, self).__init__(include)
self.regex = regex
def __eq__(self, other):
"""
Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
attributes.
"""
if isinstance(other, RegexPattern):
return self.include == other.include and self.regex == other.regex
else:
return NotImplemented
@classmethod
def pattern_to_regex(cls, pattern):
"""
Convert the pattern into an uncompiled regular expression.
*pattern* (:class:`str`) is the pattern to convert into a regular
expression.
Returns the uncompiled regular expression (:class:`str` or :data:`None`),
and whether matched files should be included (:data:`True`),
excluded (:data:`False`), or is a null-operation (:data:`None`).
.. NOTE:: The default implementation simply returns *pattern* and
:data:`True`.
"""
return pattern, True
|
willkg/socorro-siggen
|
siggen/siglists_utils.py
|
_get_file_content
|
python
|
def _get_file_content(source):
filepath = os.path.join('siglists', source + '.txt')
lines = []
with resource_stream(__name__, filepath) as f:
for i, line in enumerate(f):
line = line.decode('utf-8', 'strict').strip()
if not line or line.startswith('#'):
continue
try:
re.compile(line)
except Exception as ex:
raise BadRegularExpressionLineError(
'Regex error: {} in file {} at line {}'.format(
str(ex),
filepath,
i
)
)
lines.append(line)
if source in _SPECIAL_EXTENDED_VALUES:
lines = lines + _SPECIAL_EXTENDED_VALUES[source]
return tuple(lines)
|
Return a tuple, each value being a line of the source file.
Remove empty lines and comments (lines starting with a '#').
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/siglists_utils.py#L30-L61
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
from pkg_resources import resource_stream
# This is a hack because sentinels can be a tuple, with the second item being
# a function to verify if the sentinel applies. It's quite hard to express
# that in a .txt file, so this special value is here. This list should not
# grow much, and if it does, we should find a better solution for handling
# these special values.
_SPECIAL_EXTENDED_VALUES = {
'signature_sentinels': [
(
'mozilla::ipc::RPCChannel::Call(IPC::Message*, IPC::Message*)',
lambda x: 'CrashReporter::CreatePairedMinidumps(void*, unsigned long, nsAString_internal*, nsILocalFile**, nsILocalFile**)' in x # noqa
),
],
}
class BadRegularExpressionLineError(Exception):
"""Raised when a file contains an invalid regular expression."""
IRRELEVANT_SIGNATURE_RE = _get_file_content('irrelevant_signature_re')
PREFIX_SIGNATURE_RE = _get_file_content('prefix_signature_re')
SIGNATURE_SENTINELS = _get_file_content('signature_sentinels')
SIGNATURES_WITH_LINE_NUMBERS_RE = _get_file_content('signatures_with_line_numbers_re')
|
willkg/socorro-siggen
|
siggen/cmd_signify.py
|
main
|
python
|
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
args = parser.parse_args()
generator = SignatureGenerator(debug=args.verbose)
crash_data = json.loads(sys.stdin.read())
ret = generator.generate(crash_data)
print(json.dumps(ret, indent=2))
|
Takes crash data via stdin and generates a Socorro signature
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_signify.py#L19-L33
|
[
"def generate(self, signature_data):\n \"\"\"Takes data and returns a signature\n\n :arg dict signature_data: data to use to generate a signature\n\n :returns: ``Result`` instance\n\n \"\"\"\n result = Result()\n\n for rule in self.pipeline:\n rule_name = rule.__class__.__name__\n\n try:\n if rule.predicate(signature_data, result):\n rule.action(signature_data, result)\n\n except Exception as exc:\n if self.error_handler:\n self.error_handler(\n signature_data,\n exc_info=sys.exc_info(),\n extra={'rule': rule_name}\n )\n result.info(rule_name, 'Rule failed: %s', exc)\n\n return result\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import argparse
import json
import sys
from .generator import SignatureGenerator
DESCRIPTION = """
Given a signature data structure as JSON via stdin, generates the signature.
"""
|
willkg/socorro-siggen
|
siggen/utils.py
|
convert_to_crash_data
|
python
|
def convert_to_crash_data(raw_crash, processed_crash):
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
|
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L16-L84
|
[
"def int_or_none(data):\n try:\n return int(data)\n except (TypeError, ValueError):\n return None\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
willkg/socorro-siggen
|
siggen/utils.py
|
drop_bad_characters
|
python
|
def drop_bad_characters(text):
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
|
Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L91-L102
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
willkg/socorro-siggen
|
siggen/utils.py
|
parse_source_file
|
python
|
def parse_source_file(source_file):
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
|
Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L105-L138
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
willkg/socorro-siggen
|
siggen/utils.py
|
_is_exception
|
python
|
def _is_exception(exceptions, before_token, after_token, token):
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
|
Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L141-L159
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
willkg/socorro-siggen
|
siggen/utils.py
|
collapse
|
python
|
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
|
Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L162-L239
|
[
"def _is_exception(exceptions, before_token, after_token, token):\n \"\"\"Predicate for whether the open token is in an exception context\n\n :arg exceptions: list of strings or None\n :arg before_token: the text of the function up to the token delimiter\n :arg after_token: the text of the function after the token delimiter\n :arg token: the token (only if we're looking at a close delimiter\n\n :returns: bool\n\n \"\"\"\n if not exceptions:\n return False\n for s in exceptions:\n if before_token.endswith(s):\n return True\n if s in token:\n return True\n return False\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
def drop_prefix_and_return_type(function):
"""Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
"""
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
willkg/socorro-siggen
|
siggen/utils.py
|
drop_prefix_and_return_type
|
python
|
def drop_prefix_and_return_type(function):
DELIMITERS = {
'(': ')',
'{': '}',
'[': ']',
'<': '>',
'`': "'"
}
OPEN = DELIMITERS.keys()
CLOSE = DELIMITERS.values()
# The list of tokens accumulated so far
tokens = []
# Keeps track of open delimiters so we can match and close them
levels = []
# The current token we're building
current = []
for i, char in enumerate(function):
if char in OPEN:
levels.append(char)
current.append(char)
elif char in CLOSE:
if levels and DELIMITERS[levels[-1]] == char:
levels.pop()
current.append(char)
else:
# This is an unmatched close.
current.append(char)
elif levels:
current.append(char)
elif char == ' ':
tokens.append(''.join(current))
current = []
else:
current.append(char)
if current:
tokens.append(''.join(current))
while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):
# It's possible for the function signature to have a space between
# the function name and the parenthesized arguments or [clone ...]
# thing. If that's the case, we join the last two tokens. We keep doing
# that until the last token is nice.
#
# Example:
#
# somefunc (int arg1, int arg2)
# ^
# somefunc(int arg1, int arg2) [clone .cold.111]
# ^
# somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]
# ^ ^
tokens = tokens[:-2] + [' '.join(tokens[-2:])]
return tokens[-1]
|
Takes the function value from a frame and drops prefix and return type
For example::
static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)
^ ^^^^^^ return type
prefix
This gets changes to this::
Allocator<MozJemallocBase>::malloc(unsigned __int64)
This tokenizes on space, but takes into account types, generics, traits,
function arguments, and other parts of the function signature delimited by
things like `', <>, {}, [], and () for both C/C++ and Rust.
After tokenizing, this returns the last token since that's comprised of the
function name and its arguments.
:arg function: the function value in a frame to drop bits from
:returns: adjusted function value
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L242-L324
| null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from glom import glom
def int_or_none(data):
try:
return int(data)
except (TypeError, ValueError):
return None
def convert_to_crash_data(raw_crash, processed_crash):
"""
Takes a raw crash and a processed crash (these are Socorro-centric
data structures) and converts them to a crash data structure used
by signature generation.
:arg raw_crash: raw crash data from Socorro
:arg processed_crash: processed crash data from Socorro
:returns: crash data structure that conforms to the schema
"""
# We want to generate fresh signatures, so we remove the "normalized" field
# from stack frames from the processed crash because this is essentially
# cached data from previous processing
for thread in glom(processed_crash, 'json_dump.threads', default=[]):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
crash_data = {
# JavaStackTrace or None
'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),
# int or None
'crashing_thread': glom(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
# list of CStackTrace or None
'threads': glom(processed_crash, 'json_dump.threads', default=None),
# int or None
'hang_type': glom(processed_crash, 'hang_type', default=None),
# text or None
'os': glom(processed_crash, 'json_dump.system_info.os', default=None),
# int or None
'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),
# text or None
'abort_message': glom(raw_crash, 'AbortMessage', default=None),
# text or None
'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),
# text json with "phase", "conditions" (complicated--see code) or None
'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),
# text or None
'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),
# text or None
'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),
# text or None
'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),
# text
'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),
# text; comma-delimited e.g. "browser,flash1,flash2"
'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),
# pull out the original signature if there was one
'original_signature': glom(processed_crash, 'signature', default='')
}
return crash_data
#: List of allowed characters: ascii, printable, and non-whitespace except space
ALLOWED_CHARS = [chr(c) for c in range(32, 127)]
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text
def parse_source_file(source_file):
"""Parses a source file thing and returns the file name
Example:
>>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')
'js/src/jit/MIR.h'
:arg str source_file: the source file ("file") from a stack frame
:returns: the filename or ``None`` if it couldn't determine one
"""
if not source_file:
return None
vcsinfo = source_file.split(':')
if len(vcsinfo) == 4:
# These are repositories or cloud file systems (e.g. hg, git, s3)
vcstype, root, vcs_source_file, revision = vcsinfo
return vcs_source_file
if len(vcsinfo) == 2:
# These are directories on someone's Windows computer and vcstype is a
# file system (e.g. "c:", "d:", "f:")
vcstype, vcs_source_file = vcsinfo
return vcs_source_file
if source_file.startswith('/'):
# These are directories on OSX or Linux
return source_file
# We have no idea what this is, so return None
return None
def _is_exception(exceptions, before_token, after_token, token):
"""Predicate for whether the open token is in an exception context
:arg exceptions: list of strings or None
:arg before_token: the text of the function up to the token delimiter
:arg after_token: the text of the function after the token delimiter
:arg token: the token (only if we're looking at a close delimiter
:returns: bool
"""
if not exceptions:
return False
for s in exceptions:
if before_token.endswith(s):
return True
if s in token:
return True
return False
def collapse(
function,
open_string,
close_string,
replacement='',
exceptions=None,
):
"""Collapses the text between two delimiters in a frame function value
This collapses the text between two delimiters and either removes the text
altogether or replaces it with a replacement string.
There are certain contexts in which we might not want to collapse the text
between two delimiters. These are denoted as "exceptions" and collapse will
check for those exception strings occuring before the token to be replaced
or inside the token to be replaced.
Before::
IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)
^ ^ open token
exception string occurring before open token
Inside::
<rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute
^ ^^^^ exception string inside token
open token
:arg function: the function value from a frame to collapse tokens in
:arg open_string: the open delimiter; e.g. ``(``
:arg close_string: the close delimiter; e.g. ``)``
:arg replacement: what to replace the token with; e.g. ``<T>``
:arg exceptions: list of strings denoting exceptions where we don't want
to collapse the token
:returns: new function string with tokens collapsed
"""
collapsed = []
open_count = 0
open_token = []
for i, char in enumerate(function):
if not open_count:
if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa
open_count += 1
open_token = [char]
else:
collapsed.append(char)
else:
if char == open_string:
open_count += 1
open_token.append(char)
elif char == close_string:
open_count -= 1
open_token.append(char)
if open_count == 0:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
open_token = []
else:
open_token.append(char)
if open_count:
token = ''.join(open_token)
if _is_exception(exceptions, function[:i], function[i + 1:], token):
collapsed.append(''.join(open_token))
else:
collapsed.append(replacement)
return ''.join(collapsed)
|
willkg/socorro-siggen
|
siggen/cmd_signature.py
|
main
|
python
|
def main(argv=None):
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'--format', help='specify output format: csv, text (default)'
)
parser.add_argument(
'--different-only', dest='different', action='store_true',
help='limit output to just the signatures that changed',
)
parser.add_argument(
'crashids', metavar='crashid', nargs='*', help='crash id to generate signatures for'
)
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
if args.format == 'csv':
outputter = CSVOutput
else:
outputter = TextOutput
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
generator = SignatureGenerator()
if args.crashids:
crashids_iterable = args.crashids
elif not sys.stdin.isatty():
# If a script is piping to this script, then isatty() returns False. If
# there is no script piping to this script, then isatty() returns True
# and if we do list(sys.stdin), it'll block waiting for input.
crashids_iterable = list(sys.stdin)
else:
crashids_iterable = []
if not crashids_iterable:
parser.print_help()
return 0
with outputter() as out:
for crash_id in crashids_iterable:
crash_id = crash_id.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not exist.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
out.warning('Error fetching raw crash: %s' % raw_crash['error'])
return 1
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not have processed crash.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
out.warning('Error fetching processed crash: %s' % processed_crash['error'])
return 1
old_signature = processed_crash['signature']
crash_data = convert_to_crash_data(raw_crash, processed_crash)
result = generator.generate(crash_data)
if not args.different or old_signature != result.signature:
out.data(crash_id, old_signature, result, args.verbose)
|
Takes crash data via args and generates a Socorro signature
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_signature.py#L111-L209
|
[
"def convert_to_crash_data(raw_crash, processed_crash):\n \"\"\"\n Takes a raw crash and a processed crash (these are Socorro-centric\n data structures) and converts them to a crash data structure used\n by signature generation.\n\n :arg raw_crash: raw crash data from Socorro\n :arg processed_crash: processed crash data from Socorro\n\n :returns: crash data structure that conforms to the schema\n\n \"\"\"\n # We want to generate fresh signatures, so we remove the \"normalized\" field\n # from stack frames from the processed crash because this is essentially\n # cached data from previous processing\n for thread in glom(processed_crash, 'json_dump.threads', default=[]):\n for frame in thread.get('frames', []):\n if 'normalized' in frame:\n del frame['normalized']\n\n crash_data = {\n # JavaStackTrace or None\n 'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),\n\n # int or None\n 'crashing_thread': glom(\n processed_crash, 'json_dump.crash_info.crashing_thread', default=None\n ),\n\n # list of CStackTrace or None\n 'threads': glom(processed_crash, 'json_dump.threads', default=None),\n\n # int or None\n 'hang_type': glom(processed_crash, 'hang_type', default=None),\n\n # text or None\n 'os': glom(processed_crash, 'json_dump.system_info.os', default=None),\n\n # int or None\n 'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),\n\n # text or None\n 'abort_message': glom(raw_crash, 'AbortMessage', default=None),\n\n # text or None\n 'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),\n\n # text json with \"phase\", \"conditions\" (complicated--see code) or None\n 'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),\n\n # text or None\n 'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),\n\n # text or None\n 'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),\n\n # text or None\n 'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),\n\n # text\n 'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),\n\n # text; comma-delimited e.g. \"browser,flash1,flash2\"\n 'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),\n\n # pull out the original signature if there was one\n 'original_signature': glom(processed_crash, 'signature', default='')\n }\n return crash_data\n",
"def fetch(endpoint, crash_id, api_token=None):\n kwargs = {\n 'params': {\n 'crash_id': crash_id\n }\n }\n if api_token:\n kwargs['headers'] = {\n 'Auth-Token': api_token\n }\n\n return requests.get(API_URL + endpoint, **kwargs)\n",
"def generate(self, signature_data):\n \"\"\"Takes data and returns a signature\n\n :arg dict signature_data: data to use to generate a signature\n\n :returns: ``Result`` instance\n\n \"\"\"\n result = Result()\n\n for rule in self.pipeline:\n rule_name = rule.__class__.__name__\n\n try:\n if rule.predicate(signature_data, result):\n rule.action(signature_data, result)\n\n except Exception as exc:\n if self.error_handler:\n self.error_handler(\n signature_data,\n exc_info=sys.exc_info(),\n extra={'rule': rule_name}\n )\n result.info(rule_name, 'Rule failed: %s', exc)\n\n return result\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import csv
import os
import sys
import requests
from .generator import SignatureGenerator
from .utils import convert_to_crash_data
DESCRIPTION = """
Given one or more crash ids via command line or stdin (one per line), pulls down information from
Socorro, generates signatures, and prints signature information.
"""
EPILOG = """
Note: In order for the SignatureJitCategory rule to work, you need a valid API token from
Socorro that has "View Personally Identifiable Information" permission.
"""
# FIXME(willkg): This hits production. We might want it configurable.
API_URL = 'https://crash-stats.mozilla.com/api'
class OutputBase:
"""Base class for outputter classes
Outputter classes are context managers. If they require start/top or begin/end semantics, they
should implement ``__enter__`` and ``__exit__``.
Otherwise they can just implement ``data`` and should be fine.
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def warning(self, line):
"""Prints out a warning line to stderr
:arg str line: the line to print to stderr
"""
print('WARNING: %s' % line, file=sys.stderr)
def data(self, crash_id, old_sig, result, verbose):
"""Outputs a data point
:arg str crash_id: the crash id for the signature generated
:arg str old_sig: the old signature retrieved in the processed crash
:arg Result result: the signature result
:arg boolean verbose: whether or not to be verbose
"""
pass
class TextOutput(OutputBase):
def data(self, crash_id, old_sig, result, verbose):
print('Crash id: %s' % crash_id)
print('Original: %s' % old_sig)
print('New: %s' % result.signature)
print('Same?: %s' % (old_sig == result.signature))
if result.notes:
print('Notes: (%d)' % len(result.notes))
for note in result.notes:
print(' %s' % note)
if verbose and result.debug_log:
print('Debug: (%d)' % len(result.debug_log))
for item in result.debug_log:
print(' %s' % item)
class CSVOutput(OutputBase):
def __enter__(self):
self.out = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
self.out.writerow(['crashid', 'old', 'new', 'same?', 'notes'])
return self
def __exit__(self, exc_type, exc_value, traceback):
self.out = None
def data(self, crash_id, old_sig, result, verbose):
self.out.writerow(
[crash_id, old_sig, result.signature, str(old_sig == result.signature), result.notes]
)
def fetch(endpoint, crash_id, api_token=None):
kwargs = {
'params': {
'crash_id': crash_id
}
}
if api_token:
kwargs['headers'] = {
'Auth-Token': api_token
}
return requests.get(API_URL + endpoint, **kwargs)
|
willkg/socorro-siggen
|
siggen/cmd_fetch_data.py
|
main
|
python
|
def main():
parser = argparse.ArgumentParser(
formatter_class=WrappedTextHelpFormatter,
description=DESCRIPTION
)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'crashid', help='crash id to generate signatures for'
)
args = parser.parse_args()
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
crash_id = args.crashid.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
printerr('%s: does not exist.' % crash_id)
return 1
if resp.status_code == 429:
printerr('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
printerr('HTTP 500: %s' % resp.content)
return 1
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
print('Error fetching raw crash: %s' % raw_crash['error'], file=sys.stderr)
return 1
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
printerr('%s: does not have processed crash.' % crash_id)
return 1
if resp.status_code == 429:
printerr('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
printerr('HTTP 500: %s' % resp.content)
return 1
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
printerr('Error fetching processed crash: %s' % processed_crash['error'])
return 1
crash_data = convert_to_crash_data(raw_crash, processed_crash)
print(json.dumps(crash_data, indent=2))
|
Takes a crash id, pulls down data from Socorro, generates signature data
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_fetch_data.py#L73-L136
|
[
"def convert_to_crash_data(raw_crash, processed_crash):\n \"\"\"\n Takes a raw crash and a processed crash (these are Socorro-centric\n data structures) and converts them to a crash data structure used\n by signature generation.\n\n :arg raw_crash: raw crash data from Socorro\n :arg processed_crash: processed crash data from Socorro\n\n :returns: crash data structure that conforms to the schema\n\n \"\"\"\n # We want to generate fresh signatures, so we remove the \"normalized\" field\n # from stack frames from the processed crash because this is essentially\n # cached data from previous processing\n for thread in glom(processed_crash, 'json_dump.threads', default=[]):\n for frame in thread.get('frames', []):\n if 'normalized' in frame:\n del frame['normalized']\n\n crash_data = {\n # JavaStackTrace or None\n 'java_stack_trace': glom(processed_crash, 'java_stack_trace', default=None),\n\n # int or None\n 'crashing_thread': glom(\n processed_crash, 'json_dump.crash_info.crashing_thread', default=None\n ),\n\n # list of CStackTrace or None\n 'threads': glom(processed_crash, 'json_dump.threads', default=None),\n\n # int or None\n 'hang_type': glom(processed_crash, 'hang_type', default=None),\n\n # text or None\n 'os': glom(processed_crash, 'json_dump.system_info.os', default=None),\n\n # int or None\n 'oom_allocation_size': int_or_none(glom(raw_crash, 'OOMAllocationSize', default=None)),\n\n # text or None\n 'abort_message': glom(raw_crash, 'AbortMessage', default=None),\n\n # text or None\n 'mdsw_status_string': glom(processed_crash, 'mdsw_status_string', default=None),\n\n # text json with \"phase\", \"conditions\" (complicated--see code) or None\n 'async_shutdown_timeout': glom(raw_crash, 'AsyncShutdownTimeout', default=None),\n\n # text or None\n 'jit_category': glom(processed_crash, 'classifications.jit.category', default=None),\n\n # text or None\n 'ipc_channel_error': glom(raw_crash, 'ipc_channel_error', default=None),\n\n # text or None\n 'ipc_message_name': glom(raw_crash, 'IPCMessageName', default=None),\n\n # text\n 'moz_crash_reason': glom(processed_crash, 'moz_crash_reason', default=None),\n\n # text; comma-delimited e.g. \"browser,flash1,flash2\"\n 'additional_minidumps': glom(raw_crash, 'additional_minidumps', default=''),\n\n # pull out the original signature if there was one\n 'original_signature': glom(processed_crash, 'signature', default='')\n }\n return crash_data\n",
"def printerr(s, **kwargs):\n kwargs['file'] = sys.stderr\n print(s, **kwargs)\n",
"def fetch(endpoint, crash_id, api_token=None):\n kwargs = {\n 'params': {\n 'crash_id': crash_id\n }\n }\n if api_token:\n kwargs['headers'] = {\n 'Auth-Token': api_token\n }\n\n return requests.get(API_URL + endpoint, **kwargs)\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import argparse
import json
import os
import sys
import requests
from .utils import convert_to_crash_data
class WrappedTextHelpFormatter(argparse.HelpFormatter):
"""Subclass that wraps description and epilog text taking paragraphs into account"""
def _fill_text(self, text, width, indent):
"""Wraps text like HelpFormatter, but doesn't squash lines
This makes it easier to do lists and paragraphs.
"""
parts = text.split('\n\n')
for i, part in enumerate(parts):
# Check to see if it's a bulleted list--if so, then fill each line
if part.startswith('* '):
subparts = part.split('\n')
for j, subpart in enumerate(subparts):
subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text(
subpart, width, indent
)
parts[i] = '\n'.join(subparts)
else:
parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent)
return '\n\n'.join(parts)
DESCRIPTION = """
Takes a crash id via the command line, pulls down the crash information, and
outputs JSON for signature generation.
To use an API token from Crash Stats to alleviate issues with rate limiting,
set SOCORRO_API_TOKEN in the environment.
"""
# FIXME(willkg): This hits production. We might want it configurable.
API_URL = 'https://crash-stats.mozilla.com/api'
def printerr(s, **kwargs):
kwargs['file'] = sys.stderr
print(s, **kwargs)
def fetch(endpoint, crash_id, api_token=None):
kwargs = {
'params': {
'crash_id': crash_id
}
}
if api_token:
kwargs['headers'] = {
'Auth-Token': api_token
}
return requests.get(API_URL + endpoint, **kwargs)
|
willkg/socorro-siggen
|
siggen/cmd_fetch_data.py
|
WrappedTextHelpFormatter._fill_text
|
python
|
def _fill_text(self, text, width, indent):
parts = text.split('\n\n')
for i, part in enumerate(parts):
# Check to see if it's a bulleted list--if so, then fill each line
if part.startswith('* '):
subparts = part.split('\n')
for j, subpart in enumerate(subparts):
subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text(
subpart, width, indent
)
parts[i] = '\n'.join(subparts)
else:
parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent)
return '\n\n'.join(parts)
|
Wraps text like HelpFormatter, but doesn't squash lines
This makes it easier to do lists and paragraphs.
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_fetch_data.py#L20-L39
| null |
class WrappedTextHelpFormatter(argparse.HelpFormatter):
"""Subclass that wraps description and epilog text taking paragraphs into account"""
|
willkg/socorro-siggen
|
siggen/generator.py
|
SignatureGenerator.generate
|
python
|
def generate(self, signature_data):
result = Result()
for rule in self.pipeline:
rule_name = rule.__class__.__name__
try:
if rule.predicate(signature_data, result):
rule.action(signature_data, result)
except Exception as exc:
if self.error_handler:
self.error_handler(
signature_data,
exc_info=sys.exc_info(),
extra={'rule': rule_name}
)
result.info(rule_name, 'Rule failed: %s', exc)
return result
|
Takes data and returns a signature
:arg dict signature_data: data to use to generate a signature
:returns: ``Result`` instance
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/generator.py#L74-L100
|
[
"def info(self, rule, msg, *args):\n if args:\n msg = msg % args\n self.notes.append('%s: %s' % (rule, msg))\n"
] |
class SignatureGenerator:
def __init__(self, pipeline=None, error_handler=None):
"""
:arg pipeline: list of rules to use for signature generation
:arg error_handler: error handling function with signature
``fun(signature_data, exc_info, extra)``
"""
self.pipeline = pipeline or list(DEFAULT_PIPELINE)
self.error_handler = error_handler
|
willkg/socorro-siggen
|
siggen/cmd_doc.py
|
main
|
python
|
def main(argv=None):
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'pipeline',
help='Python dotted path to rules pipeline to document'
)
parser.add_argument('output', help='output file')
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
print('Generating documentation for %s in %s...' % (args.pipeline, args.output))
rules = import_rules(args.pipeline)
with open(args.output, 'w') as fp:
fp.write('.. THIS IS AUTOGEMERATED USING:\n')
fp.write(' \n')
fp.write(' %s\n' % (' '.join(sys.argv)))
fp.write(' \n')
fp.write('Signature generation rules pipeline\n')
fp.write('===================================\n')
fp.write('\n')
fp.write('\n')
fp.write(
'This is the signature generation pipeline defined at ``%s``:\n' %
args.pipeline
)
fp.write('\n')
for i, rule in enumerate(rules):
li = '%s. ' % (i + 1)
fp.write('%s%s\n' % (
li,
indent(get_doc(rule), ' ' * len(li))
))
fp.write('\n')
|
Generates documentation for signature generation pipeline
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_doc.py#L68-L107
|
[
"def indent(text, prefix):\n text = text.replace('\\n', '\\n' + prefix)\n return text.strip()\n",
"def import_rules(rules):\n module_path, attr = rules.rsplit('.', 1)\n module = importlib.import_module(module_path)\n return getattr(module, attr)\n",
"def get_doc(cls):\n return 'Rule: %s\\n\\n%s' % (\n cls.__class__.__name__,\n dedent_docstring(cls.__doc__)\n )\n"
] |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import importlib
import re
import sys
DESCRIPTION = """
Generates documentation for the specified signature generation pipeline.
Outputs the documentation in restructured text format.
"""
def import_rules(rules):
module_path, attr = rules.rsplit('.', 1)
module = importlib.import_module(module_path)
return getattr(module, attr)
def indent(text, prefix):
text = text.replace('\n', '\n' + prefix)
return text.strip()
LEAD_WHITESPACE = re.compile(r'^[ \t]*')
def dedent_docstring(text):
text_lines = text.splitlines()
# Figure out the indentation of all the lines to figure out how much to
# dedent by
leads = []
for line in text_lines:
if len(line.strip()) == 0:
continue
leads.append(LEAD_WHITESPACE.match(line).group(0))
if leads and len(leads[0]) == 0:
leads.pop(0)
if not leads:
return text
# Let's use the first non-empty line to dedent the text with. It's
# possible this isn't a great idea. If that's the case, we can figure
# out a different way to do it.
dedent_str = leads[0]
dedent_amount = len(dedent_str)
for i, line in enumerate(text_lines):
if line.startswith(dedent_str):
text_lines[i] = text_lines[i][dedent_amount:]
return '\n'.join(text_lines)
def get_doc(cls):
return 'Rule: %s\n\n%s' % (
cls.__class__.__name__,
dedent_docstring(cls.__doc__)
)
|
willkg/socorro-siggen
|
siggen/rules.py
|
CSignatureTool.normalize_rust_function
|
python
|
def normalize_rust_function(self, function, line):
# Drop the prefix and return type if there is any
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=(' as ',)
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
# Remove rust-generated uniqueness hashes
function = self.fixup_hash.sub('', function)
return function
|
Normalizes a single rust frame with a function
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L121-L156
|
[
"def collapse(\n function,\n open_string,\n close_string,\n replacement='',\n exceptions=None,\n):\n \"\"\"Collapses the text between two delimiters in a frame function value\n\n This collapses the text between two delimiters and either removes the text\n altogether or replaces it with a replacement string.\n\n There are certain contexts in which we might not want to collapse the text\n between two delimiters. These are denoted as \"exceptions\" and collapse will\n check for those exception strings occuring before the token to be replaced\n or inside the token to be replaced.\n\n Before::\n\n IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)\n ^ ^ open token\n exception string occurring before open token\n\n Inside::\n\n <rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute\n ^ ^^^^ exception string inside token\n open token\n\n :arg function: the function value from a frame to collapse tokens in\n :arg open_string: the open delimiter; e.g. ``(``\n :arg close_string: the close delimiter; e.g. ``)``\n :arg replacement: what to replace the token with; e.g. ``<T>``\n :arg exceptions: list of strings denoting exceptions where we don't want\n to collapse the token\n\n :returns: new function string with tokens collapsed\n\n \"\"\"\n collapsed = []\n open_count = 0\n open_token = []\n\n for i, char in enumerate(function):\n if not open_count:\n if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa\n open_count += 1\n open_token = [char]\n else:\n collapsed.append(char)\n\n else:\n if char == open_string:\n open_count += 1\n open_token.append(char)\n\n elif char == close_string:\n open_count -= 1\n open_token.append(char)\n\n if open_count == 0:\n token = ''.join(open_token)\n if _is_exception(exceptions, function[:i], function[i + 1:], token):\n collapsed.append(''.join(open_token))\n else:\n collapsed.append(replacement)\n open_token = []\n else:\n open_token.append(char)\n\n if open_count:\n token = ''.join(open_token)\n if _is_exception(exceptions, function[:i], function[i + 1:], token):\n collapsed.append(''.join(open_token))\n else:\n collapsed.append(replacement)\n\n return ''.join(collapsed)\n",
"def drop_prefix_and_return_type(function):\n \"\"\"Takes the function value from a frame and drops prefix and return type\n\n For example::\n\n static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)\n ^ ^^^^^^ return type\n prefix\n\n This gets changes to this::\n\n Allocator<MozJemallocBase>::malloc(unsigned __int64)\n\n This tokenizes on space, but takes into account types, generics, traits,\n function arguments, and other parts of the function signature delimited by\n things like `', <>, {}, [], and () for both C/C++ and Rust.\n\n After tokenizing, this returns the last token since that's comprised of the\n function name and its arguments.\n\n :arg function: the function value in a frame to drop bits from\n\n :returns: adjusted function value\n\n \"\"\"\n DELIMITERS = {\n '(': ')',\n '{': '}',\n '[': ']',\n '<': '>',\n '`': \"'\"\n }\n OPEN = DELIMITERS.keys()\n CLOSE = DELIMITERS.values()\n\n # The list of tokens accumulated so far\n tokens = []\n\n # Keeps track of open delimiters so we can match and close them\n levels = []\n\n # The current token we're building\n current = []\n\n for i, char in enumerate(function):\n if char in OPEN:\n levels.append(char)\n current.append(char)\n elif char in CLOSE:\n if levels and DELIMITERS[levels[-1]] == char:\n levels.pop()\n current.append(char)\n else:\n # This is an unmatched close.\n current.append(char)\n elif levels:\n current.append(char)\n elif char == ' ':\n tokens.append(''.join(current))\n current = []\n else:\n current.append(char)\n\n if current:\n tokens.append(''.join(current))\n\n while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):\n # It's possible for the function signature to have a space between\n # the function name and the parenthesized arguments or [clone ...]\n # thing. If that's the case, we join the last two tokens. We keep doing\n # that until the last token is nice.\n #\n # Example:\n #\n # somefunc (int arg1, int arg2)\n # ^\n # somefunc(int arg1, int arg2) [clone .cold.111]\n # ^\n # somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]\n # ^ ^\n tokens = tokens[:-2] + [' '.join(tokens[-2:])]\n\n return tokens[-1]\n"
] |
class CSignatureTool(SignatureTool):
"""Generates signature from C/C++/Rust stacks.
This is the class for signature generation tools that work on breakpad
C/C++ stacks. It normalizes frames and then runs them through the siglists
to determine which frames should be part of the signature.
"""
hang_prefixes = {
-1: "hang",
1: "chromehang"
}
def __init__(self, quit_check_callback=None):
super(CSignatureTool, self).__init__(quit_check_callback)
self.irrelevant_signature_re = re.compile(
'|'.join(siglists_utils.IRRELEVANT_SIGNATURE_RE)
)
self.prefix_signature_re = re.compile(
'|'.join(siglists_utils.PREFIX_SIGNATURE_RE)
)
self.signatures_with_line_numbers_re = re.compile(
'|'.join(siglists_utils.SIGNATURES_WITH_LINE_NUMBERS_RE)
)
self.signature_sentinels = siglists_utils.SIGNATURE_SENTINELS
self.collapse_arguments = True
self.fixup_space = re.compile(r' (?=[\*&,])')
self.fixup_comma = re.compile(r',(?! )')
self.fixup_hash = re.compile(r'::h[0-9a-fA-F]+$')
def normalize_cpp_function(self, function, line):
"""Normalizes a single cpp frame with a function"""
# Drop member function cv/ref qualifiers like const, const&, &, and &&
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
# Drop the prefix and return type if there is any if it's not operator
# overloading--operator overloading syntax doesn't have the things
# we're dropping here and can look curious, so don't try
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
# Remove PGO cold block labels like "[clone .cold.222]". bug #1397926
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
return function
def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
"""
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset)
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes
|
willkg/socorro-siggen
|
siggen/rules.py
|
CSignatureTool.normalize_cpp_function
|
python
|
def normalize_cpp_function(self, function, line):
# Drop member function cv/ref qualifiers like const, const&, &, and &&
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
# Drop the prefix and return type if there is any if it's not operator
# overloading--operator overloading syntax doesn't have the things
# we're dropping here and can look curious, so don't try
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
# Remove PGO cold block labels like "[clone .cold.222]". bug #1397926
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
return function
|
Normalizes a single cpp frame with a function
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L158-L208
|
[
"def collapse(\n function,\n open_string,\n close_string,\n replacement='',\n exceptions=None,\n):\n \"\"\"Collapses the text between two delimiters in a frame function value\n\n This collapses the text between two delimiters and either removes the text\n altogether or replaces it with a replacement string.\n\n There are certain contexts in which we might not want to collapse the text\n between two delimiters. These are denoted as \"exceptions\" and collapse will\n check for those exception strings occuring before the token to be replaced\n or inside the token to be replaced.\n\n Before::\n\n IPC::ParamTraits<nsTSubstring<char> >::Write(IPC::Message *,nsTSubstring<char> const &)\n ^ ^ open token\n exception string occurring before open token\n\n Inside::\n\n <rayon_core::job::HeapJob<BODY> as rayon_core::job::Job>::execute\n ^ ^^^^ exception string inside token\n open token\n\n :arg function: the function value from a frame to collapse tokens in\n :arg open_string: the open delimiter; e.g. ``(``\n :arg close_string: the close delimiter; e.g. ``)``\n :arg replacement: what to replace the token with; e.g. ``<T>``\n :arg exceptions: list of strings denoting exceptions where we don't want\n to collapse the token\n\n :returns: new function string with tokens collapsed\n\n \"\"\"\n collapsed = []\n open_count = 0\n open_token = []\n\n for i, char in enumerate(function):\n if not open_count:\n if char == open_string and not _is_exception(exceptions, function[:i], function[i + 1:], ''): # noqa\n open_count += 1\n open_token = [char]\n else:\n collapsed.append(char)\n\n else:\n if char == open_string:\n open_count += 1\n open_token.append(char)\n\n elif char == close_string:\n open_count -= 1\n open_token.append(char)\n\n if open_count == 0:\n token = ''.join(open_token)\n if _is_exception(exceptions, function[:i], function[i + 1:], token):\n collapsed.append(''.join(open_token))\n else:\n collapsed.append(replacement)\n open_token = []\n else:\n open_token.append(char)\n\n if open_count:\n token = ''.join(open_token)\n if _is_exception(exceptions, function[:i], function[i + 1:], token):\n collapsed.append(''.join(open_token))\n else:\n collapsed.append(replacement)\n\n return ''.join(collapsed)\n",
"def drop_prefix_and_return_type(function):\n \"\"\"Takes the function value from a frame and drops prefix and return type\n\n For example::\n\n static void * Allocator<MozJemallocBase>::malloc(unsigned __int64)\n ^ ^^^^^^ return type\n prefix\n\n This gets changes to this::\n\n Allocator<MozJemallocBase>::malloc(unsigned __int64)\n\n This tokenizes on space, but takes into account types, generics, traits,\n function arguments, and other parts of the function signature delimited by\n things like `', <>, {}, [], and () for both C/C++ and Rust.\n\n After tokenizing, this returns the last token since that's comprised of the\n function name and its arguments.\n\n :arg function: the function value in a frame to drop bits from\n\n :returns: adjusted function value\n\n \"\"\"\n DELIMITERS = {\n '(': ')',\n '{': '}',\n '[': ']',\n '<': '>',\n '`': \"'\"\n }\n OPEN = DELIMITERS.keys()\n CLOSE = DELIMITERS.values()\n\n # The list of tokens accumulated so far\n tokens = []\n\n # Keeps track of open delimiters so we can match and close them\n levels = []\n\n # The current token we're building\n current = []\n\n for i, char in enumerate(function):\n if char in OPEN:\n levels.append(char)\n current.append(char)\n elif char in CLOSE:\n if levels and DELIMITERS[levels[-1]] == char:\n levels.pop()\n current.append(char)\n else:\n # This is an unmatched close.\n current.append(char)\n elif levels:\n current.append(char)\n elif char == ' ':\n tokens.append(''.join(current))\n current = []\n else:\n current.append(char)\n\n if current:\n tokens.append(''.join(current))\n\n while len(tokens) > 1 and tokens[-1].startswith(('(', '[clone')):\n # It's possible for the function signature to have a space between\n # the function name and the parenthesized arguments or [clone ...]\n # thing. If that's the case, we join the last two tokens. We keep doing\n # that until the last token is nice.\n #\n # Example:\n #\n # somefunc (int arg1, int arg2)\n # ^\n # somefunc(int arg1, int arg2) [clone .cold.111]\n # ^\n # somefunc(int arg1, int arg2) [clone .cold.111] [clone .cold.222]\n # ^ ^\n tokens = tokens[:-2] + [' '.join(tokens[-2:])]\n\n return tokens[-1]\n"
] |
class CSignatureTool(SignatureTool):
"""Generates signature from C/C++/Rust stacks.
This is the class for signature generation tools that work on breakpad
C/C++ stacks. It normalizes frames and then runs them through the siglists
to determine which frames should be part of the signature.
"""
hang_prefixes = {
-1: "hang",
1: "chromehang"
}
def __init__(self, quit_check_callback=None):
super(CSignatureTool, self).__init__(quit_check_callback)
self.irrelevant_signature_re = re.compile(
'|'.join(siglists_utils.IRRELEVANT_SIGNATURE_RE)
)
self.prefix_signature_re = re.compile(
'|'.join(siglists_utils.PREFIX_SIGNATURE_RE)
)
self.signatures_with_line_numbers_re = re.compile(
'|'.join(siglists_utils.SIGNATURES_WITH_LINE_NUMBERS_RE)
)
self.signature_sentinels = siglists_utils.SIGNATURE_SENTINELS
self.collapse_arguments = True
self.fixup_space = re.compile(r' (?=[\*&,])')
self.fixup_comma = re.compile(r',(?! )')
self.fixup_hash = re.compile(r'::h[0-9a-fA-F]+$')
def normalize_rust_function(self, function, line):
"""Normalizes a single rust frame with a function"""
# Drop the prefix and return type if there is any
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=(' as ',)
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
# Remove rust-generated uniqueness hashes
function = self.fixup_hash.sub('', function)
return function
def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
"""
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset)
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes
|
willkg/socorro-siggen
|
siggen/rules.py
|
CSignatureTool.normalize_frame
|
python
|
def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset)
|
Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L210-L266
|
[
"def parse_source_file(source_file):\n \"\"\"Parses a source file thing and returns the file name\n\n Example:\n\n >>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06')\n 'js/src/jit/MIR.h'\n\n :arg str source_file: the source file (\"file\") from a stack frame\n\n :returns: the filename or ``None`` if it couldn't determine one\n\n \"\"\"\n if not source_file:\n return None\n\n vcsinfo = source_file.split(':')\n if len(vcsinfo) == 4:\n # These are repositories or cloud file systems (e.g. hg, git, s3)\n vcstype, root, vcs_source_file, revision = vcsinfo\n return vcs_source_file\n\n if len(vcsinfo) == 2:\n # These are directories on someone's Windows computer and vcstype is a\n # file system (e.g. \"c:\", \"d:\", \"f:\")\n vcstype, vcs_source_file = vcsinfo\n return vcs_source_file\n\n if source_file.startswith('/'):\n # These are directories on OSX or Linux\n return source_file\n\n # We have no idea what this is, so return None\n return None\n",
"def normalize_cpp_function(self, function, line):\n \"\"\"Normalizes a single cpp frame with a function\"\"\"\n # Drop member function cv/ref qualifiers like const, const&, &, and &&\n for ref in ('const', 'const&', '&&', '&'):\n if function.endswith(ref):\n function = function[:-len(ref)].strip()\n\n # Drop the prefix and return type if there is any if it's not operator\n # overloading--operator overloading syntax doesn't have the things\n # we're dropping here and can look curious, so don't try\n if '::operator' not in function:\n function = drop_prefix_and_return_type(function)\n\n # Collapse types\n function = collapse(\n function,\n open_string='<',\n close_string='>',\n replacement='<T>',\n exceptions=('name omitted', 'IPC::ParamTraits')\n )\n\n # Collapse arguments\n if self.collapse_arguments:\n function = collapse(\n function,\n open_string='(',\n close_string=')',\n replacement='',\n exceptions=('anonymous namespace', 'operator')\n )\n\n # Remove PGO cold block labels like \"[clone .cold.222]\". bug #1397926\n if 'clone .cold' in function:\n function = collapse(\n function,\n open_string='[',\n close_string=']',\n replacement=''\n )\n\n if self.signatures_with_line_numbers_re.match(function):\n function = '{}:{}'.format(function, line)\n\n # Remove spaces before all stars, ampersands, and commas\n function = self.fixup_space.sub('', function)\n\n # Ensure a space after commas\n function = self.fixup_comma.sub(', ', function)\n\n return function\n"
] |
class CSignatureTool(SignatureTool):
"""Generates signature from C/C++/Rust stacks.
This is the class for signature generation tools that work on breakpad
C/C++ stacks. It normalizes frames and then runs them through the siglists
to determine which frames should be part of the signature.
"""
hang_prefixes = {
-1: "hang",
1: "chromehang"
}
def __init__(self, quit_check_callback=None):
super(CSignatureTool, self).__init__(quit_check_callback)
self.irrelevant_signature_re = re.compile(
'|'.join(siglists_utils.IRRELEVANT_SIGNATURE_RE)
)
self.prefix_signature_re = re.compile(
'|'.join(siglists_utils.PREFIX_SIGNATURE_RE)
)
self.signatures_with_line_numbers_re = re.compile(
'|'.join(siglists_utils.SIGNATURES_WITH_LINE_NUMBERS_RE)
)
self.signature_sentinels = siglists_utils.SIGNATURE_SENTINELS
self.collapse_arguments = True
self.fixup_space = re.compile(r' (?=[\*&,])')
self.fixup_comma = re.compile(r',(?! )')
self.fixup_hash = re.compile(r'::h[0-9a-fA-F]+$')
def normalize_rust_function(self, function, line):
"""Normalizes a single rust frame with a function"""
# Drop the prefix and return type if there is any
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=(' as ',)
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
# Remove rust-generated uniqueness hashes
function = self.fixup_hash.sub('', function)
return function
def normalize_cpp_function(self, function, line):
"""Normalizes a single cpp frame with a function"""
# Drop member function cv/ref qualifiers like const, const&, &, and &&
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
# Drop the prefix and return type if there is any if it's not operator
# overloading--operator overloading syntax doesn't have the things
# we're dropping here and can look curious, so don't try
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
# Remove PGO cold block labels like "[clone .cold.222]". bug #1397926
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
return function
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
"""
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
"""
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes
|
willkg/socorro-siggen
|
siggen/rules.py
|
CSignatureTool._do_generate
|
python
|
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '):
notes = []
debug_notes = []
# shorten source_list to the first signatureSentinel
sentinel_locations = []
for a_sentinel in self.signature_sentinels:
if type(a_sentinel) == tuple:
a_sentinel, condition_fn = a_sentinel
if not condition_fn(source_list):
continue
try:
sentinel_locations.append(source_list.index(a_sentinel))
except ValueError:
pass
if sentinel_locations:
min_index = min(sentinel_locations)
debug_notes.append(
'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index)
)
source_list = source_list[min_index:]
# Get all the relevant frame signatures. Note that these function signatures
# have already been normalized at this point.
new_signature_list = []
for a_signature in source_list:
# If the signature matches the irrelevant signatures regex, skip to the next frame.
if self.irrelevant_signature_re.match(a_signature):
debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature))
continue
# If the frame signature is a dll, remove the @xxxxx part.
if '.dll' in a_signature.lower():
a_signature = a_signature.split('@')[0]
# If this trimmed DLL signature is the same as the previous frame's, skip it.
if new_signature_list and a_signature == new_signature_list[-1]:
continue
new_signature_list.append(a_signature)
# If the signature does not match the prefix signatures regex, then it is the last
# one we add to the list.
if not self.prefix_signature_re.match(a_signature):
debug_notes.append('not a prefix; stop: "{}"'.format(a_signature))
break
debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature))
# Add a special marker for hang crash reports.
if hang_type:
debug_notes.append(
'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type])
)
new_signature_list.insert(0, self.hang_prefixes[hang_type])
signature = delimiter.join(new_signature_list)
# Handle empty signatures to explain why we failed generating them.
if signature == '' or signature is None:
if crashed_thread is None:
notes.append(
"CSignatureTool: No signature could be created because we do not know which "
"thread crashed"
)
signature = "EMPTY: no crashing thread identified"
else:
notes.append(
"CSignatureTool: No proper signature could be created because no good data "
"for the crashing thread ({}) was found".format(crashed_thread)
)
try:
signature = source_list[0]
except IndexError:
signature = "EMPTY: no frame data available"
return signature, notes, debug_notes
|
each element of signatureList names a frame in the crash stack; and is:
- a prefix of a relevant frame: Append this element to the signature
- a relevant frame: Append this element and stop looking
- irrelevant: Append this element only after seeing a prefix frame
The signature is a ' | ' separated string of frame names.
|
train
|
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L268-L351
| null |
class CSignatureTool(SignatureTool):
"""Generates signature from C/C++/Rust stacks.
This is the class for signature generation tools that work on breakpad
C/C++ stacks. It normalizes frames and then runs them through the siglists
to determine which frames should be part of the signature.
"""
hang_prefixes = {
-1: "hang",
1: "chromehang"
}
def __init__(self, quit_check_callback=None):
super(CSignatureTool, self).__init__(quit_check_callback)
self.irrelevant_signature_re = re.compile(
'|'.join(siglists_utils.IRRELEVANT_SIGNATURE_RE)
)
self.prefix_signature_re = re.compile(
'|'.join(siglists_utils.PREFIX_SIGNATURE_RE)
)
self.signatures_with_line_numbers_re = re.compile(
'|'.join(siglists_utils.SIGNATURES_WITH_LINE_NUMBERS_RE)
)
self.signature_sentinels = siglists_utils.SIGNATURE_SENTINELS
self.collapse_arguments = True
self.fixup_space = re.compile(r' (?=[\*&,])')
self.fixup_comma = re.compile(r',(?! )')
self.fixup_hash = re.compile(r'::h[0-9a-fA-F]+$')
def normalize_rust_function(self, function, line):
"""Normalizes a single rust frame with a function"""
# Drop the prefix and return type if there is any
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=(' as ',)
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
# Remove rust-generated uniqueness hashes
function = self.fixup_hash.sub('', function)
return function
def normalize_cpp_function(self, function, line):
"""Normalizes a single cpp frame with a function"""
# Drop member function cv/ref qualifiers like const, const&, &, and &&
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
# Drop the prefix and return type if there is any if it's not operator
# overloading--operator overloading syntax doesn't have the things
# we're dropping here and can look curious, so don't try
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
# Remove PGO cold block labels like "[clone .cold.222]". bug #1397926
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
return function
def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
"""
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset)
|
inveniosoftware/invenio-db
|
invenio_db/ext.py
|
InvenioDB.init_app
|
python
|
def init_app(self, app, **kwargs):
self.init_db(app, **kwargs)
app.config.setdefault('ALEMBIC', {
'script_location': pkg_resources.resource_filename(
'invenio_db', 'alembic'
),
'version_locations': [
(base_entry.name, pkg_resources.resource_filename(
base_entry.module_name, os.path.join(*base_entry.attrs)
)) for base_entry in pkg_resources.iter_entry_points(
'invenio_db.alembic'
)
],
})
self.alembic.init_app(app)
app.extensions['invenio-db'] = self
app.cli.add_command(db_cmd)
|
Initialize application object.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/ext.py#L34-L53
|
[
"def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):\n \"\"\"Initialize Flask-SQLAlchemy extension.\"\"\"\n # Setup SQLAlchemy\n app.config.setdefault(\n 'SQLALCHEMY_DATABASE_URI',\n 'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')\n )\n app.config.setdefault('SQLALCHEMY_ECHO', False)\n\n # Initialize Flask-SQLAlchemy extension.\n database = kwargs.get('db', db)\n database.init_app(app)\n\n # Initialize versioning support.\n self.init_versioning(app, database, kwargs.get('versioning_manager'))\n\n # Initialize model bases\n if entry_point_group:\n for base_entry in pkg_resources.iter_entry_points(\n entry_point_group):\n base_entry.load()\n\n # All models should be loaded by now.\n sa.orm.configure_mappers()\n # Ensure that versioning classes have been built.\n if app.config['DB_VERSIONING']:\n manager = self.versioning_manager\n if manager.pending_classes:\n if not versioning_models_registered(manager, database.Model):\n manager.builder.configure_versioned_classes()\n elif 'transaction' not in database.metadata.tables:\n manager.declarative_base = database.Model\n manager.create_transaction_model()\n manager.plugins.after_build_tx_class(manager)\n"
] |
class InvenioDB(object):
"""Invenio database extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization."""
self.alembic = Alembic(run_mkdir=False, command_name='alembic')
if app:
self.init_app(app, **kwargs)
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager)
def init_versioning(self, app, database, versioning_manager=None):
"""Initialize the versioning support using SQLAlchemy-Continuum."""
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
)
|
inveniosoftware/invenio-db
|
invenio_db/ext.py
|
InvenioDB.init_db
|
python
|
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager)
|
Initialize Flask-SQLAlchemy extension.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/ext.py#L55-L88
|
[
"def versioning_models_registered(manager, base):\n \"\"\"Return True if all versioning models have been registered.\"\"\"\n declared_models = base._decl_class_registry.keys()\n return all(versioning_model_classname(manager, c) in declared_models\n for c in manager.pending_classes)\n",
"def init_versioning(self, app, database, versioning_manager=None):\n \"\"\"Initialize the versioning support using SQLAlchemy-Continuum.\"\"\"\n try:\n pkg_resources.get_distribution('sqlalchemy_continuum')\n except pkg_resources.DistributionNotFound: # pragma: no cover\n default_versioning = False\n else:\n default_versioning = True\n\n app.config.setdefault('DB_VERSIONING', default_versioning)\n\n if not app.config['DB_VERSIONING']:\n return\n\n if not default_versioning: # pragma: no cover\n raise RuntimeError(\n 'Please install extra versioning support first by running '\n 'pip install invenio-db[versioning].'\n )\n\n # Now we can import SQLAlchemy-Continuum.\n from sqlalchemy_continuum import make_versioned\n from sqlalchemy_continuum import versioning_manager as default_vm\n from sqlalchemy_continuum.plugins import FlaskPlugin\n\n # Try to guess user model class:\n if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover\n try:\n pkg_resources.get_distribution('invenio_accounts')\n except pkg_resources.DistributionNotFound:\n user_cls = None\n else:\n user_cls = 'User'\n else:\n user_cls = app.config.get('DB_VERSIONING_USER_MODEL')\n\n plugins = [FlaskPlugin()] if user_cls else []\n\n # Call make_versioned() before your models are defined.\n self.versioning_manager = versioning_manager or default_vm\n make_versioned(\n user_cls=user_cls,\n manager=self.versioning_manager,\n plugins=plugins,\n )\n\n # Register models that have been loaded beforehand.\n builder = self.versioning_manager.builder\n\n for tbl in database.metadata.tables.values():\n builder.instrument_versioned_classes(\n database.mapper, get_class_by_table(database.Model, tbl)\n )\n"
] |
class InvenioDB(object):
"""Invenio database extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization."""
self.alembic = Alembic(run_mkdir=False, command_name='alembic')
if app:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
"""Initialize application object."""
self.init_db(app, **kwargs)
app.config.setdefault('ALEMBIC', {
'script_location': pkg_resources.resource_filename(
'invenio_db', 'alembic'
),
'version_locations': [
(base_entry.name, pkg_resources.resource_filename(
base_entry.module_name, os.path.join(*base_entry.attrs)
)) for base_entry in pkg_resources.iter_entry_points(
'invenio_db.alembic'
)
],
})
self.alembic.init_app(app)
app.extensions['invenio-db'] = self
app.cli.add_command(db_cmd)
def init_versioning(self, app, database, versioning_manager=None):
"""Initialize the versioning support using SQLAlchemy-Continuum."""
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
)
|
inveniosoftware/invenio-db
|
invenio_db/ext.py
|
InvenioDB.init_versioning
|
python
|
def init_versioning(self, app, database, versioning_manager=None):
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
)
|
Initialize the versioning support using SQLAlchemy-Continuum.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/ext.py#L90-L142
| null |
class InvenioDB(object):
"""Invenio database extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization."""
self.alembic = Alembic(run_mkdir=False, command_name='alembic')
if app:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
"""Initialize application object."""
self.init_db(app, **kwargs)
app.config.setdefault('ALEMBIC', {
'script_location': pkg_resources.resource_filename(
'invenio_db', 'alembic'
),
'version_locations': [
(base_entry.name, pkg_resources.resource_filename(
base_entry.module_name, os.path.join(*base_entry.attrs)
)) for base_entry in pkg_resources.iter_entry_points(
'invenio_db.alembic'
)
],
})
self.alembic.init_app(app)
app.extensions['invenio-db'] = self
app.cli.add_command(db_cmd)
def init_db(self, app, entry_point_group='invenio_db.models', **kwargs):
"""Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy
app.config.setdefault(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///' + os.path.join(app.instance_path, app.name + '.db')
)
app.config.setdefault('SQLALCHEMY_ECHO', False)
# Initialize Flask-SQLAlchemy extension.
database = kwargs.get('db', db)
database.init_app(app)
# Initialize versioning support.
self.init_versioning(app, database, kwargs.get('versioning_manager'))
# Initialize model bases
if entry_point_group:
for base_entry in pkg_resources.iter_entry_points(
entry_point_group):
base_entry.load()
# All models should be loaded by now.
sa.orm.configure_mappers()
# Ensure that versioning classes have been built.
if app.config['DB_VERSIONING']:
manager = self.versioning_manager
if manager.pending_classes:
if not versioning_models_registered(manager, database.Model):
manager.builder.configure_versioned_classes()
elif 'transaction' not in database.metadata.tables:
manager.declarative_base = database.Model
manager.create_transaction_model()
manager.plugins.after_build_tx_class(manager)
|
inveniosoftware/invenio-db
|
invenio_db/shared.py
|
do_sqlite_connect
|
python
|
def do_sqlite_connect(dbapi_connection, connection_record):
# Enable foreign key constraint checking
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
|
Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/shared.py#L82-L91
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Shared database object for Invenio."""
from flask_sqlalchemy import SQLAlchemy as FlaskSQLAlchemy
from sqlalchemy import MetaData, event, util
from sqlalchemy.engine import Engine
from werkzeug.local import LocalProxy
NAMING_CONVENTION = util.immutabledict({
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(constraint_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s',
})
"""Configuration for constraint naming conventions."""
metadata = MetaData(naming_convention=NAMING_CONVENTION)
"""Default database metadata object holding associated schema constructs."""
class SQLAlchemy(FlaskSQLAlchemy):
"""Implement or overide extension methods."""
def apply_driver_hacks(self, app, info, options):
"""Call before engine creation."""
# Don't forget to apply hacks defined on parent object.
super(SQLAlchemy, self).apply_driver_hacks(app, info, options)
if info.drivername == 'sqlite':
connect_args = options.setdefault('connect_args', {})
if 'isolation_level' not in connect_args:
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
connect_args['isolation_level'] = None
if not event.contains(Engine, 'connect', do_sqlite_connect):
event.listen(Engine, 'connect', do_sqlite_connect)
if not event.contains(Engine, 'begin', do_sqlite_begin):
event.listen(Engine, 'begin', do_sqlite_begin)
from sqlite3 import register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return proxy._get_current_object()
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'postgresql+psycopg2': # pragma: no cover
from psycopg2.extensions import adapt, register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return adapt(proxy._get_current_object())
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'mysql+pymysql': # pragma: no cover
from pymysql import converters
def escape_local_proxy(val, mapping):
"""Get current object and try to adapt it again."""
return converters.escape_item(
val._get_current_object(),
self.engine.dialect.encoding,
mapping=mapping,
)
converters.conversions[LocalProxy] = escape_local_proxy
converters.encoders[LocalProxy] = escape_local_proxy
def do_sqlite_begin(dbapi_connection):
"""Ensure SQLite transaction are started properly.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/rel_1_0/dialects/sqlite.html#pysqlite-serializable # noqa
"""
# emit our own BEGIN
dbapi_connection.execute('BEGIN')
db = SQLAlchemy(metadata=metadata)
"""Shared database instance using Flask-SQLAlchemy extension.
This object is initialized during initialization of ``InvenioDB``
extenstion that takes care about loading all entrypoints from key
``invenio_db.models``.
"""
|
inveniosoftware/invenio-db
|
invenio_db/shared.py
|
SQLAlchemy.apply_driver_hacks
|
python
|
def apply_driver_hacks(self, app, info, options):
# Don't forget to apply hacks defined on parent object.
super(SQLAlchemy, self).apply_driver_hacks(app, info, options)
if info.drivername == 'sqlite':
connect_args = options.setdefault('connect_args', {})
if 'isolation_level' not in connect_args:
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
connect_args['isolation_level'] = None
if not event.contains(Engine, 'connect', do_sqlite_connect):
event.listen(Engine, 'connect', do_sqlite_connect)
if not event.contains(Engine, 'begin', do_sqlite_begin):
event.listen(Engine, 'begin', do_sqlite_begin)
from sqlite3 import register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return proxy._get_current_object()
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'postgresql+psycopg2': # pragma: no cover
from psycopg2.extensions import adapt, register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return adapt(proxy._get_current_object())
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'mysql+pymysql': # pragma: no cover
from pymysql import converters
def escape_local_proxy(val, mapping):
"""Get current object and try to adapt it again."""
return converters.escape_item(
val._get_current_object(),
self.engine.dialect.encoding,
mapping=mapping,
)
converters.conversions[LocalProxy] = escape_local_proxy
converters.encoders[LocalProxy] = escape_local_proxy
|
Call before engine creation.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/shared.py#L32-L79
| null |
class SQLAlchemy(FlaskSQLAlchemy):
"""Implement or overide extension methods."""
|
inveniosoftware/invenio-db
|
invenio_db/cli.py
|
create
|
python
|
def create(verbose):
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green')
|
Create tables.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/cli.py#L49-L58
|
[
"def create_alembic_version_table():\n \"\"\"Create alembic_version table.\"\"\"\n alembic = current_app.extensions['invenio-db'].alembic\n if not alembic.migration_context._has_version_table():\n alembic.migration_context._ensure_version_table()\n for head in alembic.script_directory.revision_map._real_heads:\n alembic.migration_context.stamp(alembic.script_directory, head)\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for database management."""
from __future__ import absolute_import, print_function
import sys
import click
from click import _termui_impl
from flask import current_app
from flask.cli import with_appcontext
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from werkzeug.local import LocalProxy
from .utils import create_alembic_version_table, drop_alembic_version_table
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
# Fix Python 3 compatibility issue in click
if sys.version_info > (3,):
_termui_impl.long = int # pragma: no cover
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
#
# Database commands
#
@click.group(chain=True)
def db():
"""Database commands."""
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@with_appcontext
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to drop the db?')
@with_appcontext
def drop(verbose):
"""Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green')
@db.command()
@with_appcontext
def init():
"""Create database."""
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
@db.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to destroy the db?')
@with_appcontext
def destroy():
"""Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url)
|
inveniosoftware/invenio-db
|
invenio_db/cli.py
|
drop
|
python
|
def drop(verbose):
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green')
|
Drop tables.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/cli.py#L67-L76
|
[
"def drop_alembic_version_table():\n \"\"\"Drop alembic_version table.\"\"\"\n if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):\n alembic_version = _db.Table('alembic_version', _db.metadata,\n autoload_with=_db.engine)\n alembic_version.drop(bind=_db.engine)\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for database management."""
from __future__ import absolute_import, print_function
import sys
import click
from click import _termui_impl
from flask import current_app
from flask.cli import with_appcontext
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from werkzeug.local import LocalProxy
from .utils import create_alembic_version_table, drop_alembic_version_table
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
# Fix Python 3 compatibility issue in click
if sys.version_info > (3,):
_termui_impl.long = int # pragma: no cover
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
#
# Database commands
#
@click.group(chain=True)
def db():
"""Database commands."""
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@with_appcontext
def create(verbose):
"""Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green')
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to drop the db?')
@with_appcontext
@db.command()
@with_appcontext
def init():
"""Create database."""
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
@db.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to destroy the db?')
@with_appcontext
def destroy():
"""Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url)
|
inveniosoftware/invenio-db
|
invenio_db/cli.py
|
init
|
python
|
def init():
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
|
Create database.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/cli.py#L81-L86
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for database management."""
from __future__ import absolute_import, print_function
import sys
import click
from click import _termui_impl
from flask import current_app
from flask.cli import with_appcontext
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from werkzeug.local import LocalProxy
from .utils import create_alembic_version_table, drop_alembic_version_table
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
# Fix Python 3 compatibility issue in click
if sys.version_info > (3,):
_termui_impl.long = int # pragma: no cover
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
#
# Database commands
#
@click.group(chain=True)
def db():
"""Database commands."""
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@with_appcontext
def create(verbose):
"""Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green')
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to drop the db?')
@with_appcontext
def drop(verbose):
"""Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green')
@db.command()
@with_appcontext
@db.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to destroy the db?')
@with_appcontext
def destroy():
"""Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url)
|
inveniosoftware/invenio-db
|
invenio_db/cli.py
|
destroy
|
python
|
def destroy():
click.secho('Destroying database {0}'.format(_db.engine.url),
fg='red', bold=True)
if _db.engine.name == 'sqlite':
try:
drop_database(_db.engine.url)
except FileNotFoundError as e:
click.secho('Sqlite database has not been initialised',
fg='red', bold=True)
else:
drop_database(_db.engine.url)
|
Drop database.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/cli.py#L94-L105
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line interface for database management."""
from __future__ import absolute_import, print_function
import sys
import click
from click import _termui_impl
from flask import current_app
from flask.cli import with_appcontext
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from werkzeug.local import LocalProxy
from .utils import create_alembic_version_table, drop_alembic_version_table
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
# Fix Python 3 compatibility issue in click
if sys.version_info > (3,):
_termui_impl.long = int # pragma: no cover
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
#
# Database commands
#
@click.group(chain=True)
def db():
"""Database commands."""
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@with_appcontext
def create(verbose):
"""Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True)
with click.progressbar(_db.metadata.sorted_tables) as bar:
for table in bar:
if verbose:
click.echo(' Creating table {0}'.format(table))
table.create(bind=_db.engine, checkfirst=True)
create_alembic_version_table()
click.secho('Created all tables!', fg='green')
@db.command()
@click.option('-v', '--verbose', is_flag=True, default=False)
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to drop the db?')
@with_appcontext
def drop(verbose):
"""Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True)
with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar:
for table in bar:
if verbose:
click.echo(' Dropping table {0}'.format(table))
table.drop(bind=_db.engine, checkfirst=True)
drop_alembic_version_table()
click.secho('Dropped all tables!', fg='green')
@db.command()
@with_appcontext
def init():
"""Create database."""
click.secho('Creating database {0}'.format(_db.engine.url),
fg='green')
if not database_exists(str(_db.engine.url)):
create_database(str(_db.engine.url))
@db.command()
@click.option('--yes-i-know', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Do you know that you are going to destroy the db?')
@with_appcontext
|
inveniosoftware/invenio-db
|
invenio_db/utils.py
|
rebuild_encrypted_properties
|
python
|
def rebuild_encrypted_properties(old_key, model, properties):
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
|
Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L21-L58
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
# from .signals import secret_key_changed
"""Invenio-DB utility functions."""
from flask import current_app
from sqlalchemy.engine import reflection
from werkzeug.local import LocalProxy
from .shared import db
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
inveniosoftware/invenio-db
|
invenio_db/utils.py
|
create_alembic_version_table
|
python
|
def create_alembic_version_table():
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
|
Create alembic_version table.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L61-L67
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
# from .signals import secret_key_changed
"""Invenio-DB utility functions."""
from flask import current_app
from sqlalchemy.engine import reflection
from werkzeug.local import LocalProxy
from .shared import db
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
inveniosoftware/invenio-db
|
invenio_db/utils.py
|
drop_alembic_version_table
|
python
|
def drop_alembic_version_table():
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
|
Drop alembic_version table.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L70-L75
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
# from .signals import secret_key_changed
"""Invenio-DB utility functions."""
from flask import current_app
from sqlalchemy.engine import reflection
from werkzeug.local import LocalProxy
from .shared import db
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
inveniosoftware/invenio-db
|
invenio_db/utils.py
|
versioning_model_classname
|
python
|
def versioning_model_classname(manager, model):
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
|
Get the name of the versioned model class.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L78-L84
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
# from .signals import secret_key_changed
"""Invenio-DB utility functions."""
from flask import current_app
from sqlalchemy.engine import reflection
from werkzeug.local import LocalProxy
from .shared import db
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
inveniosoftware/invenio-db
|
invenio_db/utils.py
|
versioning_models_registered
|
python
|
def versioning_models_registered(manager, base):
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes)
|
Return True if all versioning models have been registered.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L87-L91
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
# from .signals import secret_key_changed
"""Invenio-DB utility functions."""
from flask import current_app
from sqlalchemy.engine import reflection
from werkzeug.local import LocalProxy
from .shared import db
_db = LocalProxy(lambda: current_app.extensions['sqlalchemy'].db)
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
"""
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit()
def create_alembic_version_table():
"""Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic
if not alembic.migration_context._has_version_table():
alembic.migration_context._ensure_version_table()
for head in alembic.script_directory.revision_map._real_heads:
alembic.migration_context.stamp(alembic.script_directory, head)
def drop_alembic_version_table():
"""Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'):
alembic_version = _db.Table('alembic_version', _db.metadata,
autoload_with=_db.engine)
alembic_version.drop(bind=_db.engine)
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class."""
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,)
|
inveniosoftware/invenio-db
|
invenio_db/alembic/35c1075e6360_force_naming_convention.py
|
upgrade
|
python
|
def upgrade():
op.execute('COMMIT') # See https://bitbucket.org/zzzeek/alembic/issue/123
ctx = op.get_context()
metadata = ctx.opts['target_metadata']
metadata.naming_convention = NAMING_CONVENTION
metadata.bind = ctx.connection.engine
insp = Inspector.from_engine(ctx.connection.engine)
for table_name in insp.get_table_names():
if table_name not in metadata.tables:
continue
table = metadata.tables[table_name]
ixs = {}
uqs = {}
fks = {}
for ix in insp.get_indexes(table_name):
ixs[tuple(ix['column_names'])] = ix
for uq in insp.get_unique_constraints(table_name):
uqs[tuple(uq['column_names'])] = uq
for fk in insp.get_foreign_keys(table_name):
fks[(tuple(fk['constrained_columns']), fk['referred_table'])] = fk
with op.batch_alter_table(
table_name, naming_convention=NAMING_CONVENTION) as batch_op:
for c in list(table.constraints) + list(table.indexes):
key = None
if isinstance(c, sa.schema.ForeignKeyConstraint):
key = (tuple(c.column_keys), c.referred_table.name)
fk = fks.get(key)
if fk and c.name != fk['name']:
batch_op.drop_constraint(
fk['name'], type_='foreignkey')
batch_op.create_foreign_key(
op.f(c.name), fk['referred_table'],
fk['constrained_columns'],
fk['referred_columns'],
**fk['options']
)
elif isinstance(c, sa.schema.UniqueConstraint):
key = tuple(c.columns.keys())
uq = uqs.get(key)
if uq and c.name != uq['name']:
batch_op.drop_constraint(uq['name'], type_='unique')
batch_op.create_unique_constraint(
op.f(c.name), uq['column_names'])
elif isinstance(c, sa.schema.CheckConstraint):
util.warn('Update {0.table.name} CHECK {0.name} '
'manually'.format(c))
elif isinstance(c, sa.schema.Index):
key = tuple(c.columns.keys())
ix = ixs.get(key)
if ix and c.name != ix['name']:
batch_op.drop_index(ix['name'])
batch_op.create_index(
op.f(c.name), ix['column_names'],
unique=ix['unique'],
)
elif isinstance(c, sa.schema.PrimaryKeyConstraint) or \
c.name == '_unnamed_':
# NOTE we don't care about primary keys since they have
# specific syntax.
pass
else:
raise RuntimeError('Missing {0!r}'.format(c))
|
Upgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/alembic/35c1075e6360_force_naming_convention.py#L31-L98
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Force naming convention."""
import sqlalchemy as sa
from alembic import op, util
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '35c1075e6360'
down_revision = 'dbdbc1b19cf2'
branch_labels = ()
depends_on = None
NAMING_CONVENTION = sa.util.immutabledict({
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(constraint_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s',
})
"""Configuration for constraint naming conventions (v1.0.0b5)."""
def downgrade():
"""Downgrade database."""
|
inveniosoftware/invenio-db
|
invenio_db/alembic/dbdbc1b19cf2_create_transaction_table.py
|
upgrade
|
python
|
def upgrade():
op.create_table(
'transaction',
sa.Column('issued_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('remote_addr', sa.String(length=50), nullable=True),
)
op.create_primary_key('pk_transaction', 'transaction', ['id'])
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(CreateSequence(Sequence('transaction_id_seq')))
|
Update database.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/alembic/dbdbc1b19cf2_create_transaction_table.py#L23-L33
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create transaction table."""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.schema import Sequence, CreateSequence, \
DropSequence
# revision identifiers, used by Alembic.
revision = 'dbdbc1b19cf2'
down_revision = '96e796392533'
branch_labels = ()
depends_on = None
def downgrade():
"""Downgrade database."""
op.drop_table('transaction')
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(DropSequence(Sequence('transaction_id_seq')))
|
inveniosoftware/invenio-db
|
invenio_db/alembic/dbdbc1b19cf2_create_transaction_table.py
|
downgrade
|
python
|
def downgrade():
op.drop_table('transaction')
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(DropSequence(Sequence('transaction_id_seq')))
|
Downgrade database.
|
train
|
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/alembic/dbdbc1b19cf2_create_transaction_table.py#L36-L40
| null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create transaction table."""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.schema import Sequence, CreateSequence, \
DropSequence
# revision identifiers, used by Alembic.
revision = 'dbdbc1b19cf2'
down_revision = '96e796392533'
branch_labels = ()
depends_on = None
def upgrade():
"""Update database."""
op.create_table(
'transaction',
sa.Column('issued_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('remote_addr', sa.String(length=50), nullable=True),
)
op.create_primary_key('pk_transaction', 'transaction', ['id'])
if op._proxy.migration_context.dialect.supports_sequences:
op.execute(CreateSequence(Sequence('transaction_id_seq')))
|
klen/flask-pw
|
flask_pw/models.py
|
Signal.connect
|
python
|
def connect(self, receiver):
if not callable(receiver):
raise ValueError('Invalid receiver: %s' % receiver)
self.receivers.append(receiver)
|
Append receiver.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L56-L60
| null |
class Signal:
"""Simplest signals implementation.
::
@Model.post_save
def example(instance, created=False):
pass
"""
__slots__ = 'receivers'
def __init__(self):
"""Initialize the signal."""
self.receivers = []
def __call__(self, receiver):
"""Support decorators.."""
self.connect(receiver)
return receiver
def disconnect(self, receiver):
"""Remove receiver."""
try:
self.receivers.remove(receiver)
except ValueError:
raise ValueError('Unknown receiver: %s' % receiver)
def send(self, instance, *args, **kwargs):
"""Send signal."""
for receiver in self.receivers:
receiver(instance, *args, **kwargs)
|
klen/flask-pw
|
flask_pw/models.py
|
Signal.disconnect
|
python
|
def disconnect(self, receiver):
try:
self.receivers.remove(receiver)
except ValueError:
raise ValueError('Unknown receiver: %s' % receiver)
|
Remove receiver.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L67-L72
| null |
class Signal:
"""Simplest signals implementation.
::
@Model.post_save
def example(instance, created=False):
pass
"""
__slots__ = 'receivers'
def __init__(self):
"""Initialize the signal."""
self.receivers = []
def connect(self, receiver):
"""Append receiver."""
if not callable(receiver):
raise ValueError('Invalid receiver: %s' % receiver)
self.receivers.append(receiver)
def __call__(self, receiver):
"""Support decorators.."""
self.connect(receiver)
return receiver
def send(self, instance, *args, **kwargs):
"""Send signal."""
for receiver in self.receivers:
receiver(instance, *args, **kwargs)
|
klen/flask-pw
|
flask_pw/models.py
|
Signal.send
|
python
|
def send(self, instance, *args, **kwargs):
for receiver in self.receivers:
receiver(instance, *args, **kwargs)
|
Send signal.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L74-L77
| null |
class Signal:
"""Simplest signals implementation.
::
@Model.post_save
def example(instance, created=False):
pass
"""
__slots__ = 'receivers'
def __init__(self):
"""Initialize the signal."""
self.receivers = []
def connect(self, receiver):
"""Append receiver."""
if not callable(receiver):
raise ValueError('Invalid receiver: %s' % receiver)
self.receivers.append(receiver)
def __call__(self, receiver):
"""Support decorators.."""
self.connect(receiver)
return receiver
def disconnect(self, receiver):
"""Remove receiver."""
try:
self.receivers.remove(receiver)
except ValueError:
raise ValueError('Unknown receiver: %s' % receiver)
|
klen/flask-pw
|
flask_pw/models.py
|
Model.select
|
python
|
def select(cls, *args, **kwargs):
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query
|
Support read slaves.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L105-L109
| null |
class Model(with_metaclass(BaseSignalModel, pw.Model)):
@classmethod
@classmethod
def raw(cls, *args, **kwargs):
query = super(Model, cls).raw(*args, **kwargs)
if query._sql.lower().startswith('select'):
query.database = cls._get_read_database()
return query
@property
def pk(self):
"""Return primary key value."""
return self._get_pk_value()
@classmethod
def get_or_none(cls, *args, **kwargs):
try:
return cls.get(*args, **kwargs)
except cls.DoesNotExist:
return None
def save(self, force_insert=False, **kwargs):
"""Send signals."""
created = force_insert or not bool(self.pk)
self.pre_save.send(self, created=created)
super(Model, self).save(force_insert=force_insert, **kwargs)
self.post_save.send(self, created=created)
def delete_instance(self, *args, **kwargs):
"""Send signals."""
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self)
@classmethod
def _get_read_database(cls):
if not cls._meta.read_slaves:
return cls._meta.database
current_idx = getattr(cls, '_read_slave_idx', -1)
cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
return cls._meta.read_slaves[cls._read_slave_idx]
|
klen/flask-pw
|
flask_pw/models.py
|
Model.save
|
python
|
def save(self, force_insert=False, **kwargs):
created = force_insert or not bool(self.pk)
self.pre_save.send(self, created=created)
super(Model, self).save(force_insert=force_insert, **kwargs)
self.post_save.send(self, created=created)
|
Send signals.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L130-L135
| null |
class Model(with_metaclass(BaseSignalModel, pw.Model)):
@classmethod
def select(cls, *args, **kwargs):
"""Support read slaves."""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query
@classmethod
def raw(cls, *args, **kwargs):
query = super(Model, cls).raw(*args, **kwargs)
if query._sql.lower().startswith('select'):
query.database = cls._get_read_database()
return query
@property
def pk(self):
"""Return primary key value."""
return self._get_pk_value()
@classmethod
def get_or_none(cls, *args, **kwargs):
try:
return cls.get(*args, **kwargs)
except cls.DoesNotExist:
return None
def delete_instance(self, *args, **kwargs):
"""Send signals."""
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self)
@classmethod
def _get_read_database(cls):
if not cls._meta.read_slaves:
return cls._meta.database
current_idx = getattr(cls, '_read_slave_idx', -1)
cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
return cls._meta.read_slaves[cls._read_slave_idx]
|
klen/flask-pw
|
flask_pw/models.py
|
Model.delete_instance
|
python
|
def delete_instance(self, *args, **kwargs):
self.pre_delete.send(self)
super(Model, self).delete_instance(*args, **kwargs)
self.post_delete.send(self)
|
Send signals.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/models.py#L137-L141
| null |
class Model(with_metaclass(BaseSignalModel, pw.Model)):
@classmethod
def select(cls, *args, **kwargs):
"""Support read slaves."""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query
@classmethod
def raw(cls, *args, **kwargs):
query = super(Model, cls).raw(*args, **kwargs)
if query._sql.lower().startswith('select'):
query.database = cls._get_read_database()
return query
@property
def pk(self):
"""Return primary key value."""
return self._get_pk_value()
@classmethod
def get_or_none(cls, *args, **kwargs):
try:
return cls.get(*args, **kwargs)
except cls.DoesNotExist:
return None
def save(self, force_insert=False, **kwargs):
"""Send signals."""
created = force_insert or not bool(self.pk)
self.pre_save.send(self, created=created)
super(Model, self).save(force_insert=force_insert, **kwargs)
self.post_save.send(self, created=created)
@classmethod
def _get_read_database(cls):
if not cls._meta.read_slaves:
return cls._meta.database
current_idx = getattr(cls, '_read_slave_idx', -1)
cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
return cls._meta.read_slaves[cls._read_slave_idx]
|
klen/flask-pw
|
flask_pw/__init__.py
|
get_database
|
python
|
def get_database(obj, **params):
if isinstance(obj, string_types):
return connect(obj, **params)
return obj
|
Get database from given URI/Object.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L244-L248
| null |
import logging
import os
from importlib import import_module
import peewee as pw
from cached_property import cached_property
from flask._compat import string_types
from peewee_migrate.router import Router
from playhouse.db_url import connect
from .models import Model, BaseSignalModel, Choices # noqa
__license__ = "MIT"
__project__ = "Flask-PW"
__version__ = "1.1.3"
LOGGER = logging.getLogger(__name__)
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.init_app
|
python
|
def init_app(self, app, database=None):
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
|
Initialize application.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L30-L70
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.close
|
python
|
def close(self, response):
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
|
Close connection to database.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L77-L82
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.Model
|
python
|
def Model(self):
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
|
Bind model to self database.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L85-L93
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.models
|
python
|
def models(self):
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
|
Return self.application models.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L96-L115
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.cmd_create
|
python
|
def cmd_create(self, name, auto=False):
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
|
Create a new migration.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L117-L130
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.cmd_migrate
|
python
|
def cmd_migrate(self, name=None, fake=False):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
|
Run migrations.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L132-L145
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.cmd_rollback
|
python
|
def cmd_rollback(self, name):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
|
Rollback migrations.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L147-L158
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.cmd_list
|
python
|
def cmd_list(self):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
|
List migrations.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L160-L175
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.cmd_merge
|
python
|
def cmd_merge(self):
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
|
Merge migrations.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L177-L188
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
@cached_property
def manager(self):
"""Integrate a Flask-Script."""
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
klen/flask-pw
|
flask_pw/__init__.py
|
Peewee.manager
|
python
|
def manager(self):
from flask_script import Manager, Command
manager = Manager(usage="Migrate database.")
manager.add_command('create', Command(self.cmd_create))
manager.add_command('migrate', Command(self.cmd_migrate))
manager.add_command('rollback', Command(self.cmd_rollback))
manager.add_command('list', Command(self.cmd_list))
manager.add_command('merge', Command(self.cmd_merge))
return manager
|
Integrate a Flask-Script.
|
train
|
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L191-L202
| null |
class Peewee(object):
def __init__(self, app=None):
"""Initialize the plugin."""
self.app = app
self.database = pw.Proxy()
if app is not None:
self.init_app(app)
def init_app(self, app, database=None):
"""Initialize application."""
# Register application
if not app:
raise RuntimeError('Invalid application.')
self.app = app
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['peewee'] = self
app.config.setdefault('PEEWEE_CONNECTION_PARAMS', {})
app.config.setdefault('PEEWEE_DATABASE_URI', 'sqlite:///peewee.sqlite')
app.config.setdefault('PEEWEE_MANUAL', False)
app.config.setdefault('PEEWEE_MIGRATE_DIR', 'migrations')
app.config.setdefault('PEEWEE_MIGRATE_TABLE', 'migratehistory')
app.config.setdefault('PEEWEE_MODELS_CLASS', Model)
app.config.setdefault('PEEWEE_MODELS_IGNORE', [])
app.config.setdefault('PEEWEE_MODELS_MODULE', '')
app.config.setdefault('PEEWEE_READ_SLAVES', '')
app.config.setdefault('PEEWEE_USE_READ_SLAVES', True)
# Initialize database
params = app.config['PEEWEE_CONNECTION_PARAMS']
database = database or app.config.get('PEEWEE_DATABASE_URI')
if not database:
raise RuntimeError('Invalid database.')
database = get_database(database, **params)
slaves = app.config['PEEWEE_READ_SLAVES']
if isinstance(slaves, string_types):
slaves = slaves.split(',')
self.slaves = [get_database(slave, **params) for slave in slaves if slave]
self.database.initialize(database)
if self.database.database == ':memory:':
app.config['PEEWEE_MANUAL'] = True
if not app.config['PEEWEE_MANUAL']:
app.before_request(self.connect)
app.teardown_request(self.close)
def connect(self):
"""Initialize connection to databse."""
LOGGER.info('Connecting [%s]', os.getpid())
return self.database.connect()
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
@cached_property
def Model(self):
"""Bind model to self database."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
meta_params = {'database': self.database}
if self.slaves and self.app.config['PEEWEE_USE_READ_SLAVES']:
meta_params['read_slaves'] = self.slaves
Meta = type('Meta', (), meta_params)
return type('Model', (Model_,), {'Meta': Meta})
@property
def models(self):
"""Return self.application models."""
Model_ = self.app.config['PEEWEE_MODELS_CLASS']
ignore = self.app.config['PEEWEE_MODELS_IGNORE']
models = []
if Model_ is not Model:
try:
mod = import_module(self.app.config['PEEWEE_MODELS_MODULE'])
for model in dir(mod):
models = getattr(mod, model)
if not isinstance(model, pw.Model):
continue
models.append(models)
except ImportError:
return models
elif isinstance(Model_, BaseSignalModel):
models = BaseSignalModel.models
return [m for m in models if m._meta.name not in ignore]
def cmd_create(self, name, auto=False):
"""Create a new migration."""
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
if auto:
auto = self.models
router.create(name, auto=auto)
def cmd_migrate(self, name=None, fake=False):
"""Run migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
migrations = router.run(name, fake=fake)
if migrations:
LOGGER.warn('Migrations are completed: %s' % ', '.join(migrations))
def cmd_rollback(self, name):
"""Rollback migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('INFO')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.rollback(name)
def cmd_list(self):
"""List migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
LOGGER.info('Migrations are done:')
LOGGER.info('\n'.join(router.done))
LOGGER.info('')
LOGGER.info('Migrations are undone:')
LOGGER.info('\n'.join(router.diff))
def cmd_merge(self):
"""Merge migrations."""
from peewee_migrate.router import Router, LOGGER
LOGGER.setLevel('DEBUG')
LOGGER.propagate = 0
router = Router(self.database,
migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'],
migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE'])
router.merge()
@cached_property
@cached_property
def cli(self):
import click
@click.group()
def cli():
"""Peewee Migrations."""
from flask import current_app
if self.app is None:
self.init_app(current_app)
@cli.command()
@click.argument('name')
@click.option('--auto', is_flag=True)
def create(name, auto=False):
"""Create a new migration."""
return self.cmd_create(name, auto)
@cli.command()
@click.argument('name', default=None, required=False)
@click.option('--fake', is_flag=True)
def migrate(name, fake=False):
"""Run migrations."""
return self.cmd_migrate(name, fake)
@cli.command()
@click.argument('name')
def rollback(name):
"""Rollback migrations."""
return self.cmd_rollback(name)
@cli.command()
def list():
"""List migrations."""
return self.cmd_list()
return cli
|
xolox/python-rotate-backups
|
rotate_backups/cli.py
|
main
|
python
|
def main():
coloredlogs.install(syslog=True)
# Command line option defaults.
rotation_scheme = {}
kw = dict(include_list=[], exclude_list=[])
parallel = False
use_sudo = False
# Internal state.
selected_locations = []
# Parse the command line arguments.
try:
options, arguments = getopt.getopt(sys.argv[1:], 'M:H:d:w:m:y:I:x:jpri:c:r:uC:nvqh', [
'minutely=', 'hourly=', 'daily=', 'weekly=', 'monthly=', 'yearly=',
'include=', 'exclude=', 'parallel', 'prefer-recent', 'relaxed',
'ionice=', 'config=', 'use-sudo', 'dry-run', 'removal-command=',
'verbose', 'quiet', 'help',
])
for option, value in options:
if option in ('-M', '--minutely'):
rotation_scheme['minutely'] = coerce_retention_period(value)
elif option in ('-H', '--hourly'):
rotation_scheme['hourly'] = coerce_retention_period(value)
elif option in ('-d', '--daily'):
rotation_scheme['daily'] = coerce_retention_period(value)
elif option in ('-w', '--weekly'):
rotation_scheme['weekly'] = coerce_retention_period(value)
elif option in ('-m', '--monthly'):
rotation_scheme['monthly'] = coerce_retention_period(value)
elif option in ('-y', '--yearly'):
rotation_scheme['yearly'] = coerce_retention_period(value)
elif option in ('-I', '--include'):
kw['include_list'].append(value)
elif option in ('-x', '--exclude'):
kw['exclude_list'].append(value)
elif option in ('-j', '--parallel'):
parallel = True
elif option in ('-p', '--prefer-recent'):
kw['prefer_recent'] = True
elif option in ('-r', '--relaxed'):
kw['strict'] = False
elif option in ('-i', '--ionice'):
value = validate_ionice_class(value.lower().strip())
kw['io_scheduling_class'] = value
elif option in ('-c', '--config'):
kw['config_file'] = parse_path(value)
elif option in ('-u', '--use-sudo'):
use_sudo = True
elif option in ('-n', '--dry-run'):
logger.info("Performing a dry run (because of %s option) ..", option)
kw['dry_run'] = True
elif option in ('-C', '--removal-command'):
removal_command = shlex.split(value)
logger.info("Using custom removal command: %s", removal_command)
kw['removal_command'] = removal_command
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity()
elif option in ('-h', '--help'):
usage(__doc__)
return
else:
assert False, "Unhandled option! (programming error)"
if rotation_scheme:
logger.verbose("Rotation scheme defined on command line: %s", rotation_scheme)
if arguments:
# Rotation of the locations given on the command line.
location_source = 'command line arguments'
selected_locations.extend(coerce_location(value, sudo=use_sudo) for value in arguments)
else:
# Rotation of all configured locations.
location_source = 'configuration file'
selected_locations.extend(
location for location, rotation_scheme, options
in load_config_file(configuration_file=kw.get('config_file'), expand=True)
)
# Inform the user which location(s) will be rotated.
if selected_locations:
logger.verbose("Selected %s based on %s:",
pluralize(len(selected_locations), "location"),
location_source)
for number, location in enumerate(selected_locations, start=1):
logger.verbose(" %i. %s", number, location)
else:
# Show the usage message when no directories are given nor configured.
logger.verbose("No location(s) to rotate selected.")
usage(__doc__)
return
except Exception as e:
logger.error("%s", e)
sys.exit(1)
# Rotate the backups in the selected directories.
program = RotateBackups(rotation_scheme, **kw)
if parallel:
program.rotate_concurrent(*selected_locations)
else:
for location in selected_locations:
program.rotate_backups(location)
|
Command line interface for the ``rotate-backups`` program.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/cli.py#L205-L303
|
[
"def coerce_retention_period(value):\n \"\"\"\n Coerce a retention period to a Python value.\n\n :param value: A string containing the text 'always', a number or\n an expression that can be evaluated to a number.\n :returns: A number or the string 'always'.\n :raises: :exc:`~exceptions.ValueError` when the string can't be coerced.\n \"\"\"\n # Numbers pass through untouched.\n if not isinstance(value, numbers.Number):\n # Other values are expected to be strings.\n if not isinstance(value, string_types):\n msg = \"Expected string, got %s instead!\"\n raise ValueError(msg % type(value))\n # Check for the literal string `always'.\n value = value.strip()\n if value.lower() == 'always':\n value = 'always'\n else:\n # Evaluate other strings as expressions.\n value = simple_eval(value)\n if not isinstance(value, numbers.Number):\n msg = \"Expected numeric result, got %s instead!\"\n raise ValueError(msg % type(value))\n return value\n",
"def load_config_file(configuration_file=None, expand=True):\n \"\"\"\n Load a configuration file with backup directories and rotation schemes.\n\n :param configuration_file: Override the pathname of the configuration file\n to load (a string or :data:`None`).\n :param expand: :data:`True` to expand filename patterns to their matches,\n :data:`False` otherwise.\n :returns: A generator of tuples with four values each:\n\n 1. An execution context created using :mod:`executor.contexts`.\n 2. The pathname of a directory with backups (a string).\n 3. A dictionary with the rotation scheme.\n 4. A dictionary with additional options.\n :raises: :exc:`~exceptions.ValueError` when `configuration_file` is given\n but doesn't exist or can't be loaded.\n\n This function is used by :class:`RotateBackups` to discover user defined\n rotation schemes and by :mod:`rotate_backups.cli` to discover directories\n for which backup rotation is configured. When `configuration_file` isn't\n given :class:`~update_dotdee.ConfigLoader` is used to search for\n configuration files in the following locations:\n\n - ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``\n - ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``\n - ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``\n\n All of the available configuration files are loaded in the order given\n above, so that sections in user-specific configuration files override\n sections by the same name in system-wide configuration files.\n \"\"\"\n expand_notice_given = False\n if configuration_file:\n loader = ConfigLoader(available_files=[configuration_file], strict=True)\n else:\n loader = ConfigLoader(program_name='rotate-backups', strict=False)\n for section in loader.section_names:\n items = dict(loader.get_options(section))\n context_options = {}\n if coerce_boolean(items.get('use-sudo')):\n context_options['sudo'] = True\n if items.get('ssh-user'):\n context_options['ssh_user'] = items['ssh-user']\n location = coerce_location(section, **context_options)\n rotation_scheme = dict((name, coerce_retention_period(items[name]))\n for name in SUPPORTED_FREQUENCIES\n if name in items)\n options = dict(include_list=split(items.get('include-list', '')),\n exclude_list=split(items.get('exclude-list', '')),\n io_scheduling_class=items.get('ionice'),\n strict=coerce_boolean(items.get('strict', 'yes')),\n prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))\n # Don't override the value of the 'removal_command' property unless the\n # 'removal-command' configuration file option has a value set.\n if items.get('removal-command'):\n options['removal_command'] = shlex.split(items['removal-command'])\n # Expand filename patterns?\n if expand and location.have_wildcards:\n logger.verbose(\"Expanding filename pattern %s on %s ..\", location.directory, location.context)\n if location.is_remote and not expand_notice_given:\n logger.notice(\"Expanding remote filename patterns (may be slow) ..\")\n expand_notice_given = True\n for match in sorted(location.context.glob(location.directory)):\n if location.context.is_directory(match):\n logger.verbose(\"Matched directory: %s\", match)\n expanded = Location(context=location.context, directory=match)\n yield expanded, rotation_scheme, options\n else:\n logger.verbose(\"Ignoring match (not a directory): %s\", match)\n else:\n yield location, rotation_scheme, options\n"
] |
# rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 2, 2018
# URL: https://github.com/xolox/python-rotate-backups
"""
Usage: rotate-backups [OPTIONS] [DIRECTORY, ..]
Easy rotation of backups based on the Python package by the same name.
To use this program you specify a rotation scheme via (a combination of) the
--hourly, --daily, --weekly, --monthly and/or --yearly options and the
directory (or directories) containing backups to rotate as one or more
positional arguments.
You can rotate backups on a remote system over SSH by prefixing a DIRECTORY
with an SSH alias and separating the two with a colon (similar to how rsync
accepts remote locations).
Instead of specifying directories and a rotation scheme on the command line you
can also add them to a configuration file. For more details refer to the online
documentation (see also the --config option).
Please use the --dry-run option to test the effect of the specified rotation
scheme before letting this program loose on your precious backups! If you don't
test the results using the dry run mode and this program eats more backups than
intended you have no right to complain ;-).
Supported options:
-M, --minutely=COUNT
In a literal sense this option sets the number of "backups per minute" to
preserve during rotation. For most use cases that doesn't make a lot of
sense :-) but you can combine the --minutely and --relaxed options to
preserve more than one backup per hour. Refer to the usage of the -H,
--hourly option for details about COUNT.
-H, --hourly=COUNT
Set the number of hourly backups to preserve during rotation:
- If COUNT is a number it gives the number of hourly backups to preserve,
starting from the most recent hourly backup and counting back in time.
- Alternatively you can provide an expression that will be evaluated to get
a number (e.g. if COUNT is `7 * 2' the result would be 14).
- You can also pass `always' for COUNT, in this case all hourly backups are
preserved.
- By default no hourly backups are preserved.
-d, --daily=COUNT
Set the number of daily backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-w, --weekly=COUNT
Set the number of weekly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-m, --monthly=COUNT
Set the number of monthly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-y, --yearly=COUNT
Set the number of yearly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-I, --include=PATTERN
Only process backups that match the shell pattern given by PATTERN. This
argument can be repeated. Make sure to quote PATTERN so the shell doesn't
expand the pattern before it's received by rotate-backups.
-x, --exclude=PATTERN
Don't process backups that match the shell pattern given by PATTERN. This
argument can be repeated. Make sure to quote PATTERN so the shell doesn't
expand the pattern before it's received by rotate-backups.
-j, --parallel
Remove backups in parallel, one backup per mount point at a time. The idea
behind this approach is that parallel rotation is most useful when the
files to be removed are on different disks and so multiple devices can be
utilized at the same time.
Because mount points are per system the -j, --parallel option will also
parallelize over backups located on multiple remote systems.
-p, --prefer-recent
By default the first (oldest) backup in each time slot is preserved. If
you'd prefer to keep the most recent backup in each time slot instead then
this option is for you.
-r, --relaxed
By default the time window for each rotation scheme is enforced (this is
referred to as strict rotation) but the -r, --relaxed option can be used
to alter this behavior. The easiest way to explain the difference between
strict and relaxed rotation is using an example:
- When using strict rotation and the number of hourly backups to preserve
is three, only backups created in the relevant time window (the hour of
the most recent backup and the two hours leading up to that) will match
the hourly frequency.
- When using relaxed rotation the three most recent backups will all match
the hourly frequency (and thus be preserved), regardless of the
calculated time window.
If the explanation above is not clear enough, here's a simple way to decide
whether you want to customize this behavior or not:
- If your backups are created at regular intervals and you never miss an
interval then strict rotation (the default) is probably the best choice.
- If your backups are created at irregular intervals then you may want to
use the -r, --relaxed option in order to preserve more backups.
-i, --ionice=CLASS
Use the `ionice' program to set the I/O scheduling class and priority of
the `rm' invocations used to remove backups. CLASS is expected to be one of
the values `idle', `best-effort' or `realtime'. Refer to the man page of
the `ionice' program for details about these values.
-c, --config=FILENAME
Load configuration from FILENAME. If this option isn't given the following
default locations are searched for configuration files:
- /etc/rotate-backups.ini and /etc/rotate-backups.d/*.ini
- ~/.rotate-backups.ini and ~/.rotate-backups.d/*.ini
- ~/.config/rotate-backups.ini and ~/.config/rotate-backups.d/*.ini
Any available configuration files are loaded in the order given above, so
that sections in user-specific configuration files override sections by the
same name in system-wide configuration files. For more details refer to the
online documentation.
-u, --use-sudo
Enable the use of `sudo' to rotate backups in directories that are not
readable and/or writable for the current user (or the user logged in to a
remote system over SSH).
-n, --dry-run
Don't make any changes, just print what would be done. This makes it easy
to evaluate the impact of a rotation scheme without losing any backups.
-C, --removal-command=CMD
Change the command used to remove backups. The value of CMD defaults to
``rm -fR``. This choice was made because it works regardless of whether
"backups to be rotated" are files or directories or a mixture of both.
As an example of why you might want to change this, CephFS snapshots are
represented as regular directory trees that can be deleted at once with a
single 'rmdir' command (even though according to POSIX semantics this
command should refuse to remove nonempty directories, but I digress).
-v, --verbose
Increase logging verbosity (can be repeated).
-q, --quiet
Decrease logging verbosity (can be repeated).
-h, --help
Show this message and exit.
"""
# Standard library modules.
import getopt
import shlex
import sys
# External dependencies.
import coloredlogs
from executor import validate_ionice_class
from humanfriendly import parse_path, pluralize
from humanfriendly.terminal import usage
from verboselogs import VerboseLogger
# Modules included in our package.
from rotate_backups import (
RotateBackups,
coerce_location,
coerce_retention_period,
load_config_file,
)
# Initialize a logger.
logger = VerboseLogger(__name__)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
coerce_location
|
python
|
def coerce_location(value, **options):
# Location objects pass through untouched.
if not isinstance(value, Location):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected Location object or string, got %s instead!"
raise ValueError(msg % type(value))
# Try to parse a remote location.
ssh_alias, _, directory = value.partition(':')
if ssh_alias and directory and '/' not in ssh_alias:
options['ssh_alias'] = ssh_alias
else:
directory = value
# Create the location object.
value = Location(
context=create_context(**options),
directory=parse_path(directory),
)
return value
|
Coerce a string to a :class:`Location` object.
:param value: The value to coerce (a string or :class:`Location` object).
:param options: Any keyword arguments are passed on to
:func:`~executor.contexts.create_context()`.
:returns: A :class:`Location` object.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L93-L119
| null |
# rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 3, 2018
# URL: https://github.com/xolox/python-rotate-backups
"""
Simple to use Python API for rotation of backups.
The :mod:`rotate_backups` module contains the Python API of the
`rotate-backups` package. The core logic of the package is contained in the
:class:`RotateBackups` class.
"""
# Standard library modules.
import collections
import datetime
import fnmatch
import numbers
import os
import re
import shlex
# External dependencies.
from dateutil.relativedelta import relativedelta
from executor import ExternalCommandFailed
from executor.concurrent import CommandPool
from executor.contexts import RemoteContext, create_context
from humanfriendly import Timer, coerce_boolean, format_path, parse_path, pluralize
from humanfriendly.text import compact, concatenate, split
from natsort import natsort
from property_manager import (
PropertyManager,
cached_property,
key_property,
lazy_property,
mutable_property,
required_property,
)
from simpleeval import simple_eval
from six import string_types
from update_dotdee import ConfigLoader
from verboselogs import VerboseLogger
# Semi-standard module versioning.
__version__ = '6.0'
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
ORDERED_FREQUENCIES = (
('minutely', relativedelta(minutes=1)),
('hourly', relativedelta(hours=1)),
('daily', relativedelta(days=1)),
('weekly', relativedelta(weeks=1)),
('monthly', relativedelta(months=1)),
('yearly', relativedelta(years=1)),
)
"""
A list of tuples with two values each:
- The name of a rotation frequency (a string like 'hourly', 'daily', etc.).
- A :class:`~dateutil.relativedelta.relativedelta` object.
The tuples are sorted by increasing delta (intentionally).
"""
SUPPORTED_FREQUENCIES = dict(ORDERED_FREQUENCIES)
"""
A dictionary with rotation frequency names (strings) as keys and
:class:`~dateutil.relativedelta.relativedelta` objects as values. This
dictionary is generated based on the tuples in :data:`ORDERED_FREQUENCIES`.
"""
TIMESTAMP_PATTERN = re.compile(r'''
# Required components.
(?P<year>\d{4} ) \D?
(?P<month>\d{2}) \D?
(?P<day>\d{2} ) \D?
(?:
# Optional components.
(?P<hour>\d{2} ) \D?
(?P<minute>\d{2}) \D?
(?P<second>\d{2})?
)?
''', re.VERBOSE)
"""
A compiled regular expression object used to match timestamps encoded in
filenames.
"""
def coerce_retention_period(value):
"""
Coerce a retention period to a Python value.
:param value: A string containing the text 'always', a number or
an expression that can be evaluated to a number.
:returns: A number or the string 'always'.
:raises: :exc:`~exceptions.ValueError` when the string can't be coerced.
"""
# Numbers pass through untouched.
if not isinstance(value, numbers.Number):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected string, got %s instead!"
raise ValueError(msg % type(value))
# Check for the literal string `always'.
value = value.strip()
if value.lower() == 'always':
value = 'always'
else:
# Evaluate other strings as expressions.
value = simple_eval(value)
if not isinstance(value, numbers.Number):
msg = "Expected numeric result, got %s instead!"
raise ValueError(msg % type(value))
return value
def load_config_file(configuration_file=None, expand=True):
"""
Load a configuration file with backup directories and rotation schemes.
:param configuration_file: Override the pathname of the configuration file
to load (a string or :data:`None`).
:param expand: :data:`True` to expand filename patterns to their matches,
:data:`False` otherwise.
:returns: A generator of tuples with four values each:
1. An execution context created using :mod:`executor.contexts`.
2. The pathname of a directory with backups (a string).
3. A dictionary with the rotation scheme.
4. A dictionary with additional options.
:raises: :exc:`~exceptions.ValueError` when `configuration_file` is given
but doesn't exist or can't be loaded.
This function is used by :class:`RotateBackups` to discover user defined
rotation schemes and by :mod:`rotate_backups.cli` to discover directories
for which backup rotation is configured. When `configuration_file` isn't
given :class:`~update_dotdee.ConfigLoader` is used to search for
configuration files in the following locations:
- ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``
- ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``
- ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``
All of the available configuration files are loaded in the order given
above, so that sections in user-specific configuration files override
sections by the same name in system-wide configuration files.
"""
expand_notice_given = False
if configuration_file:
loader = ConfigLoader(available_files=[configuration_file], strict=True)
else:
loader = ConfigLoader(program_name='rotate-backups', strict=False)
for section in loader.section_names:
items = dict(loader.get_options(section))
context_options = {}
if coerce_boolean(items.get('use-sudo')):
context_options['sudo'] = True
if items.get('ssh-user'):
context_options['ssh_user'] = items['ssh-user']
location = coerce_location(section, **context_options)
rotation_scheme = dict((name, coerce_retention_period(items[name]))
for name in SUPPORTED_FREQUENCIES
if name in items)
options = dict(include_list=split(items.get('include-list', '')),
exclude_list=split(items.get('exclude-list', '')),
io_scheduling_class=items.get('ionice'),
strict=coerce_boolean(items.get('strict', 'yes')),
prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))
# Don't override the value of the 'removal_command' property unless the
# 'removal-command' configuration file option has a value set.
if items.get('removal-command'):
options['removal_command'] = shlex.split(items['removal-command'])
# Expand filename patterns?
if expand and location.have_wildcards:
logger.verbose("Expanding filename pattern %s on %s ..", location.directory, location.context)
if location.is_remote and not expand_notice_given:
logger.notice("Expanding remote filename patterns (may be slow) ..")
expand_notice_given = True
for match in sorted(location.context.glob(location.directory)):
if location.context.is_directory(match):
logger.verbose("Matched directory: %s", match)
expanded = Location(context=location.context, directory=match)
yield expanded, rotation_scheme, options
else:
logger.verbose("Ignoring match (not a directory): %s", match)
else:
yield location, rotation_scheme, options
def rotate_backups(directory, rotation_scheme, **options):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
.. note:: This function exists to preserve backwards compatibility with
older versions of the `rotate-backups` package where all of the
logic was exposed as a single function. Please refer to the
documentation of the :class:`RotateBackups` initializer and the
:func:`~RotateBackups.rotate_backups()` method for an explanation
of this function's parameters.
"""
program = RotateBackups(rotation_scheme=rotation_scheme, **options)
program.rotate_backups(directory)
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
class Backup(PropertyManager):
""":class:`Backup` objects represent a rotation subject."""
key_properties = 'timestamp', 'pathname'
"""
Customize the ordering of :class:`Backup` objects.
:class:`Backup` objects are ordered first by their :attr:`timestamp` and
second by their :attr:`pathname`. This class variable overrides
:attr:`~property_manager.PropertyManager.key_properties`.
"""
@key_property
def pathname(self):
"""The pathname of the backup (a string)."""
@key_property
def timestamp(self):
"""The date and time when the backup was created (a :class:`~datetime.datetime` object)."""
@property
def week(self):
"""The ISO week number of :attr:`timestamp` (a number)."""
return self.timestamp.isocalendar()[1]
def __getattr__(self, name):
"""Defer attribute access to :attr:`timestamp`."""
return getattr(self.timestamp, name)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
coerce_retention_period
|
python
|
def coerce_retention_period(value):
# Numbers pass through untouched.
if not isinstance(value, numbers.Number):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected string, got %s instead!"
raise ValueError(msg % type(value))
# Check for the literal string `always'.
value = value.strip()
if value.lower() == 'always':
value = 'always'
else:
# Evaluate other strings as expressions.
value = simple_eval(value)
if not isinstance(value, numbers.Number):
msg = "Expected numeric result, got %s instead!"
raise ValueError(msg % type(value))
return value
|
Coerce a retention period to a Python value.
:param value: A string containing the text 'always', a number or
an expression that can be evaluated to a number.
:returns: A number or the string 'always'.
:raises: :exc:`~exceptions.ValueError` when the string can't be coerced.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L122-L147
| null |
# rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 3, 2018
# URL: https://github.com/xolox/python-rotate-backups
"""
Simple to use Python API for rotation of backups.
The :mod:`rotate_backups` module contains the Python API of the
`rotate-backups` package. The core logic of the package is contained in the
:class:`RotateBackups` class.
"""
# Standard library modules.
import collections
import datetime
import fnmatch
import numbers
import os
import re
import shlex
# External dependencies.
from dateutil.relativedelta import relativedelta
from executor import ExternalCommandFailed
from executor.concurrent import CommandPool
from executor.contexts import RemoteContext, create_context
from humanfriendly import Timer, coerce_boolean, format_path, parse_path, pluralize
from humanfriendly.text import compact, concatenate, split
from natsort import natsort
from property_manager import (
PropertyManager,
cached_property,
key_property,
lazy_property,
mutable_property,
required_property,
)
from simpleeval import simple_eval
from six import string_types
from update_dotdee import ConfigLoader
from verboselogs import VerboseLogger
# Semi-standard module versioning.
__version__ = '6.0'
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
ORDERED_FREQUENCIES = (
('minutely', relativedelta(minutes=1)),
('hourly', relativedelta(hours=1)),
('daily', relativedelta(days=1)),
('weekly', relativedelta(weeks=1)),
('monthly', relativedelta(months=1)),
('yearly', relativedelta(years=1)),
)
"""
A list of tuples with two values each:
- The name of a rotation frequency (a string like 'hourly', 'daily', etc.).
- A :class:`~dateutil.relativedelta.relativedelta` object.
The tuples are sorted by increasing delta (intentionally).
"""
SUPPORTED_FREQUENCIES = dict(ORDERED_FREQUENCIES)
"""
A dictionary with rotation frequency names (strings) as keys and
:class:`~dateutil.relativedelta.relativedelta` objects as values. This
dictionary is generated based on the tuples in :data:`ORDERED_FREQUENCIES`.
"""
TIMESTAMP_PATTERN = re.compile(r'''
# Required components.
(?P<year>\d{4} ) \D?
(?P<month>\d{2}) \D?
(?P<day>\d{2} ) \D?
(?:
# Optional components.
(?P<hour>\d{2} ) \D?
(?P<minute>\d{2}) \D?
(?P<second>\d{2})?
)?
''', re.VERBOSE)
"""
A compiled regular expression object used to match timestamps encoded in
filenames.
"""
def coerce_location(value, **options):
"""
Coerce a string to a :class:`Location` object.
:param value: The value to coerce (a string or :class:`Location` object).
:param options: Any keyword arguments are passed on to
:func:`~executor.contexts.create_context()`.
:returns: A :class:`Location` object.
"""
# Location objects pass through untouched.
if not isinstance(value, Location):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected Location object or string, got %s instead!"
raise ValueError(msg % type(value))
# Try to parse a remote location.
ssh_alias, _, directory = value.partition(':')
if ssh_alias and directory and '/' not in ssh_alias:
options['ssh_alias'] = ssh_alias
else:
directory = value
# Create the location object.
value = Location(
context=create_context(**options),
directory=parse_path(directory),
)
return value
def load_config_file(configuration_file=None, expand=True):
"""
Load a configuration file with backup directories and rotation schemes.
:param configuration_file: Override the pathname of the configuration file
to load (a string or :data:`None`).
:param expand: :data:`True` to expand filename patterns to their matches,
:data:`False` otherwise.
:returns: A generator of tuples with four values each:
1. An execution context created using :mod:`executor.contexts`.
2. The pathname of a directory with backups (a string).
3. A dictionary with the rotation scheme.
4. A dictionary with additional options.
:raises: :exc:`~exceptions.ValueError` when `configuration_file` is given
but doesn't exist or can't be loaded.
This function is used by :class:`RotateBackups` to discover user defined
rotation schemes and by :mod:`rotate_backups.cli` to discover directories
for which backup rotation is configured. When `configuration_file` isn't
given :class:`~update_dotdee.ConfigLoader` is used to search for
configuration files in the following locations:
- ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``
- ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``
- ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``
All of the available configuration files are loaded in the order given
above, so that sections in user-specific configuration files override
sections by the same name in system-wide configuration files.
"""
expand_notice_given = False
if configuration_file:
loader = ConfigLoader(available_files=[configuration_file], strict=True)
else:
loader = ConfigLoader(program_name='rotate-backups', strict=False)
for section in loader.section_names:
items = dict(loader.get_options(section))
context_options = {}
if coerce_boolean(items.get('use-sudo')):
context_options['sudo'] = True
if items.get('ssh-user'):
context_options['ssh_user'] = items['ssh-user']
location = coerce_location(section, **context_options)
rotation_scheme = dict((name, coerce_retention_period(items[name]))
for name in SUPPORTED_FREQUENCIES
if name in items)
options = dict(include_list=split(items.get('include-list', '')),
exclude_list=split(items.get('exclude-list', '')),
io_scheduling_class=items.get('ionice'),
strict=coerce_boolean(items.get('strict', 'yes')),
prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))
# Don't override the value of the 'removal_command' property unless the
# 'removal-command' configuration file option has a value set.
if items.get('removal-command'):
options['removal_command'] = shlex.split(items['removal-command'])
# Expand filename patterns?
if expand and location.have_wildcards:
logger.verbose("Expanding filename pattern %s on %s ..", location.directory, location.context)
if location.is_remote and not expand_notice_given:
logger.notice("Expanding remote filename patterns (may be slow) ..")
expand_notice_given = True
for match in sorted(location.context.glob(location.directory)):
if location.context.is_directory(match):
logger.verbose("Matched directory: %s", match)
expanded = Location(context=location.context, directory=match)
yield expanded, rotation_scheme, options
else:
logger.verbose("Ignoring match (not a directory): %s", match)
else:
yield location, rotation_scheme, options
def rotate_backups(directory, rotation_scheme, **options):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
.. note:: This function exists to preserve backwards compatibility with
older versions of the `rotate-backups` package where all of the
logic was exposed as a single function. Please refer to the
documentation of the :class:`RotateBackups` initializer and the
:func:`~RotateBackups.rotate_backups()` method for an explanation
of this function's parameters.
"""
program = RotateBackups(rotation_scheme=rotation_scheme, **options)
program.rotate_backups(directory)
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
class Backup(PropertyManager):
""":class:`Backup` objects represent a rotation subject."""
key_properties = 'timestamp', 'pathname'
"""
Customize the ordering of :class:`Backup` objects.
:class:`Backup` objects are ordered first by their :attr:`timestamp` and
second by their :attr:`pathname`. This class variable overrides
:attr:`~property_manager.PropertyManager.key_properties`.
"""
@key_property
def pathname(self):
"""The pathname of the backup (a string)."""
@key_property
def timestamp(self):
"""The date and time when the backup was created (a :class:`~datetime.datetime` object)."""
@property
def week(self):
"""The ISO week number of :attr:`timestamp` (a number)."""
return self.timestamp.isocalendar()[1]
def __getattr__(self, name):
"""Defer attribute access to :attr:`timestamp`."""
return getattr(self.timestamp, name)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
load_config_file
|
python
|
def load_config_file(configuration_file=None, expand=True):
expand_notice_given = False
if configuration_file:
loader = ConfigLoader(available_files=[configuration_file], strict=True)
else:
loader = ConfigLoader(program_name='rotate-backups', strict=False)
for section in loader.section_names:
items = dict(loader.get_options(section))
context_options = {}
if coerce_boolean(items.get('use-sudo')):
context_options['sudo'] = True
if items.get('ssh-user'):
context_options['ssh_user'] = items['ssh-user']
location = coerce_location(section, **context_options)
rotation_scheme = dict((name, coerce_retention_period(items[name]))
for name in SUPPORTED_FREQUENCIES
if name in items)
options = dict(include_list=split(items.get('include-list', '')),
exclude_list=split(items.get('exclude-list', '')),
io_scheduling_class=items.get('ionice'),
strict=coerce_boolean(items.get('strict', 'yes')),
prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))
# Don't override the value of the 'removal_command' property unless the
# 'removal-command' configuration file option has a value set.
if items.get('removal-command'):
options['removal_command'] = shlex.split(items['removal-command'])
# Expand filename patterns?
if expand and location.have_wildcards:
logger.verbose("Expanding filename pattern %s on %s ..", location.directory, location.context)
if location.is_remote and not expand_notice_given:
logger.notice("Expanding remote filename patterns (may be slow) ..")
expand_notice_given = True
for match in sorted(location.context.glob(location.directory)):
if location.context.is_directory(match):
logger.verbose("Matched directory: %s", match)
expanded = Location(context=location.context, directory=match)
yield expanded, rotation_scheme, options
else:
logger.verbose("Ignoring match (not a directory): %s", match)
else:
yield location, rotation_scheme, options
|
Load a configuration file with backup directories and rotation schemes.
:param configuration_file: Override the pathname of the configuration file
to load (a string or :data:`None`).
:param expand: :data:`True` to expand filename patterns to their matches,
:data:`False` otherwise.
:returns: A generator of tuples with four values each:
1. An execution context created using :mod:`executor.contexts`.
2. The pathname of a directory with backups (a string).
3. A dictionary with the rotation scheme.
4. A dictionary with additional options.
:raises: :exc:`~exceptions.ValueError` when `configuration_file` is given
but doesn't exist or can't be loaded.
This function is used by :class:`RotateBackups` to discover user defined
rotation schemes and by :mod:`rotate_backups.cli` to discover directories
for which backup rotation is configured. When `configuration_file` isn't
given :class:`~update_dotdee.ConfigLoader` is used to search for
configuration files in the following locations:
- ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``
- ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``
- ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``
All of the available configuration files are loaded in the order given
above, so that sections in user-specific configuration files override
sections by the same name in system-wide configuration files.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L150-L220
|
[
"def coerce_location(value, **options):\n \"\"\"\n Coerce a string to a :class:`Location` object.\n\n :param value: The value to coerce (a string or :class:`Location` object).\n :param options: Any keyword arguments are passed on to\n :func:`~executor.contexts.create_context()`.\n :returns: A :class:`Location` object.\n \"\"\"\n # Location objects pass through untouched.\n if not isinstance(value, Location):\n # Other values are expected to be strings.\n if not isinstance(value, string_types):\n msg = \"Expected Location object or string, got %s instead!\"\n raise ValueError(msg % type(value))\n # Try to parse a remote location.\n ssh_alias, _, directory = value.partition(':')\n if ssh_alias and directory and '/' not in ssh_alias:\n options['ssh_alias'] = ssh_alias\n else:\n directory = value\n # Create the location object.\n value = Location(\n context=create_context(**options),\n directory=parse_path(directory),\n )\n return value\n"
] |
# rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 3, 2018
# URL: https://github.com/xolox/python-rotate-backups
"""
Simple to use Python API for rotation of backups.
The :mod:`rotate_backups` module contains the Python API of the
`rotate-backups` package. The core logic of the package is contained in the
:class:`RotateBackups` class.
"""
# Standard library modules.
import collections
import datetime
import fnmatch
import numbers
import os
import re
import shlex
# External dependencies.
from dateutil.relativedelta import relativedelta
from executor import ExternalCommandFailed
from executor.concurrent import CommandPool
from executor.contexts import RemoteContext, create_context
from humanfriendly import Timer, coerce_boolean, format_path, parse_path, pluralize
from humanfriendly.text import compact, concatenate, split
from natsort import natsort
from property_manager import (
PropertyManager,
cached_property,
key_property,
lazy_property,
mutable_property,
required_property,
)
from simpleeval import simple_eval
from six import string_types
from update_dotdee import ConfigLoader
from verboselogs import VerboseLogger
# Semi-standard module versioning.
__version__ = '6.0'
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
ORDERED_FREQUENCIES = (
('minutely', relativedelta(minutes=1)),
('hourly', relativedelta(hours=1)),
('daily', relativedelta(days=1)),
('weekly', relativedelta(weeks=1)),
('monthly', relativedelta(months=1)),
('yearly', relativedelta(years=1)),
)
"""
A list of tuples with two values each:
- The name of a rotation frequency (a string like 'hourly', 'daily', etc.).
- A :class:`~dateutil.relativedelta.relativedelta` object.
The tuples are sorted by increasing delta (intentionally).
"""
SUPPORTED_FREQUENCIES = dict(ORDERED_FREQUENCIES)
"""
A dictionary with rotation frequency names (strings) as keys and
:class:`~dateutil.relativedelta.relativedelta` objects as values. This
dictionary is generated based on the tuples in :data:`ORDERED_FREQUENCIES`.
"""
TIMESTAMP_PATTERN = re.compile(r'''
# Required components.
(?P<year>\d{4} ) \D?
(?P<month>\d{2}) \D?
(?P<day>\d{2} ) \D?
(?:
# Optional components.
(?P<hour>\d{2} ) \D?
(?P<minute>\d{2}) \D?
(?P<second>\d{2})?
)?
''', re.VERBOSE)
"""
A compiled regular expression object used to match timestamps encoded in
filenames.
"""
def coerce_location(value, **options):
"""
Coerce a string to a :class:`Location` object.
:param value: The value to coerce (a string or :class:`Location` object).
:param options: Any keyword arguments are passed on to
:func:`~executor.contexts.create_context()`.
:returns: A :class:`Location` object.
"""
# Location objects pass through untouched.
if not isinstance(value, Location):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected Location object or string, got %s instead!"
raise ValueError(msg % type(value))
# Try to parse a remote location.
ssh_alias, _, directory = value.partition(':')
if ssh_alias and directory and '/' not in ssh_alias:
options['ssh_alias'] = ssh_alias
else:
directory = value
# Create the location object.
value = Location(
context=create_context(**options),
directory=parse_path(directory),
)
return value
def coerce_retention_period(value):
"""
Coerce a retention period to a Python value.
:param value: A string containing the text 'always', a number or
an expression that can be evaluated to a number.
:returns: A number or the string 'always'.
:raises: :exc:`~exceptions.ValueError` when the string can't be coerced.
"""
# Numbers pass through untouched.
if not isinstance(value, numbers.Number):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected string, got %s instead!"
raise ValueError(msg % type(value))
# Check for the literal string `always'.
value = value.strip()
if value.lower() == 'always':
value = 'always'
else:
# Evaluate other strings as expressions.
value = simple_eval(value)
if not isinstance(value, numbers.Number):
msg = "Expected numeric result, got %s instead!"
raise ValueError(msg % type(value))
return value
def rotate_backups(directory, rotation_scheme, **options):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
.. note:: This function exists to preserve backwards compatibility with
older versions of the `rotate-backups` package where all of the
logic was exposed as a single function. Please refer to the
documentation of the :class:`RotateBackups` initializer and the
:func:`~RotateBackups.rotate_backups()` method for an explanation
of this function's parameters.
"""
program = RotateBackups(rotation_scheme=rotation_scheme, **options)
program.rotate_backups(directory)
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
class Backup(PropertyManager):
""":class:`Backup` objects represent a rotation subject."""
key_properties = 'timestamp', 'pathname'
"""
Customize the ordering of :class:`Backup` objects.
:class:`Backup` objects are ordered first by their :attr:`timestamp` and
second by their :attr:`pathname`. This class variable overrides
:attr:`~property_manager.PropertyManager.key_properties`.
"""
@key_property
def pathname(self):
"""The pathname of the backup (a string)."""
@key_property
def timestamp(self):
"""The date and time when the backup was created (a :class:`~datetime.datetime` object)."""
@property
def week(self):
"""The ISO week number of :attr:`timestamp` (a number)."""
return self.timestamp.isocalendar()[1]
def __getattr__(self, name):
"""Defer attribute access to :attr:`timestamp`."""
return getattr(self.timestamp, name)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
rotate_backups
|
python
|
def rotate_backups(directory, rotation_scheme, **options):
program = RotateBackups(rotation_scheme=rotation_scheme, **options)
program.rotate_backups(directory)
|
Rotate the backups in a directory according to a flexible rotation scheme.
.. note:: This function exists to preserve backwards compatibility with
older versions of the `rotate-backups` package where all of the
logic was exposed as a single function. Please refer to the
documentation of the :class:`RotateBackups` initializer and the
:func:`~RotateBackups.rotate_backups()` method for an explanation
of this function's parameters.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L223-L235
| null |
# rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 3, 2018
# URL: https://github.com/xolox/python-rotate-backups
"""
Simple to use Python API for rotation of backups.
The :mod:`rotate_backups` module contains the Python API of the
`rotate-backups` package. The core logic of the package is contained in the
:class:`RotateBackups` class.
"""
# Standard library modules.
import collections
import datetime
import fnmatch
import numbers
import os
import re
import shlex
# External dependencies.
from dateutil.relativedelta import relativedelta
from executor import ExternalCommandFailed
from executor.concurrent import CommandPool
from executor.contexts import RemoteContext, create_context
from humanfriendly import Timer, coerce_boolean, format_path, parse_path, pluralize
from humanfriendly.text import compact, concatenate, split
from natsort import natsort
from property_manager import (
PropertyManager,
cached_property,
key_property,
lazy_property,
mutable_property,
required_property,
)
from simpleeval import simple_eval
from six import string_types
from update_dotdee import ConfigLoader
from verboselogs import VerboseLogger
# Semi-standard module versioning.
__version__ = '6.0'
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
ORDERED_FREQUENCIES = (
('minutely', relativedelta(minutes=1)),
('hourly', relativedelta(hours=1)),
('daily', relativedelta(days=1)),
('weekly', relativedelta(weeks=1)),
('monthly', relativedelta(months=1)),
('yearly', relativedelta(years=1)),
)
"""
A list of tuples with two values each:
- The name of a rotation frequency (a string like 'hourly', 'daily', etc.).
- A :class:`~dateutil.relativedelta.relativedelta` object.
The tuples are sorted by increasing delta (intentionally).
"""
SUPPORTED_FREQUENCIES = dict(ORDERED_FREQUENCIES)
"""
A dictionary with rotation frequency names (strings) as keys and
:class:`~dateutil.relativedelta.relativedelta` objects as values. This
dictionary is generated based on the tuples in :data:`ORDERED_FREQUENCIES`.
"""
TIMESTAMP_PATTERN = re.compile(r'''
# Required components.
(?P<year>\d{4} ) \D?
(?P<month>\d{2}) \D?
(?P<day>\d{2} ) \D?
(?:
# Optional components.
(?P<hour>\d{2} ) \D?
(?P<minute>\d{2}) \D?
(?P<second>\d{2})?
)?
''', re.VERBOSE)
"""
A compiled regular expression object used to match timestamps encoded in
filenames.
"""
def coerce_location(value, **options):
"""
Coerce a string to a :class:`Location` object.
:param value: The value to coerce (a string or :class:`Location` object).
:param options: Any keyword arguments are passed on to
:func:`~executor.contexts.create_context()`.
:returns: A :class:`Location` object.
"""
# Location objects pass through untouched.
if not isinstance(value, Location):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected Location object or string, got %s instead!"
raise ValueError(msg % type(value))
# Try to parse a remote location.
ssh_alias, _, directory = value.partition(':')
if ssh_alias and directory and '/' not in ssh_alias:
options['ssh_alias'] = ssh_alias
else:
directory = value
# Create the location object.
value = Location(
context=create_context(**options),
directory=parse_path(directory),
)
return value
def coerce_retention_period(value):
"""
Coerce a retention period to a Python value.
:param value: A string containing the text 'always', a number or
an expression that can be evaluated to a number.
:returns: A number or the string 'always'.
:raises: :exc:`~exceptions.ValueError` when the string can't be coerced.
"""
# Numbers pass through untouched.
if not isinstance(value, numbers.Number):
# Other values are expected to be strings.
if not isinstance(value, string_types):
msg = "Expected string, got %s instead!"
raise ValueError(msg % type(value))
# Check for the literal string `always'.
value = value.strip()
if value.lower() == 'always':
value = 'always'
else:
# Evaluate other strings as expressions.
value = simple_eval(value)
if not isinstance(value, numbers.Number):
msg = "Expected numeric result, got %s instead!"
raise ValueError(msg % type(value))
return value
def load_config_file(configuration_file=None, expand=True):
"""
Load a configuration file with backup directories and rotation schemes.
:param configuration_file: Override the pathname of the configuration file
to load (a string or :data:`None`).
:param expand: :data:`True` to expand filename patterns to their matches,
:data:`False` otherwise.
:returns: A generator of tuples with four values each:
1. An execution context created using :mod:`executor.contexts`.
2. The pathname of a directory with backups (a string).
3. A dictionary with the rotation scheme.
4. A dictionary with additional options.
:raises: :exc:`~exceptions.ValueError` when `configuration_file` is given
but doesn't exist or can't be loaded.
This function is used by :class:`RotateBackups` to discover user defined
rotation schemes and by :mod:`rotate_backups.cli` to discover directories
for which backup rotation is configured. When `configuration_file` isn't
given :class:`~update_dotdee.ConfigLoader` is used to search for
configuration files in the following locations:
- ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``
- ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``
- ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``
All of the available configuration files are loaded in the order given
above, so that sections in user-specific configuration files override
sections by the same name in system-wide configuration files.
"""
expand_notice_given = False
if configuration_file:
loader = ConfigLoader(available_files=[configuration_file], strict=True)
else:
loader = ConfigLoader(program_name='rotate-backups', strict=False)
for section in loader.section_names:
items = dict(loader.get_options(section))
context_options = {}
if coerce_boolean(items.get('use-sudo')):
context_options['sudo'] = True
if items.get('ssh-user'):
context_options['ssh_user'] = items['ssh-user']
location = coerce_location(section, **context_options)
rotation_scheme = dict((name, coerce_retention_period(items[name]))
for name in SUPPORTED_FREQUENCIES
if name in items)
options = dict(include_list=split(items.get('include-list', '')),
exclude_list=split(items.get('exclude-list', '')),
io_scheduling_class=items.get('ionice'),
strict=coerce_boolean(items.get('strict', 'yes')),
prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))
# Don't override the value of the 'removal_command' property unless the
# 'removal-command' configuration file option has a value set.
if items.get('removal-command'):
options['removal_command'] = shlex.split(items['removal-command'])
# Expand filename patterns?
if expand and location.have_wildcards:
logger.verbose("Expanding filename pattern %s on %s ..", location.directory, location.context)
if location.is_remote and not expand_notice_given:
logger.notice("Expanding remote filename patterns (may be slow) ..")
expand_notice_given = True
for match in sorted(location.context.glob(location.directory)):
if location.context.is_directory(match):
logger.verbose("Matched directory: %s", match)
expanded = Location(context=location.context, directory=match)
yield expanded, rotation_scheme, options
else:
logger.verbose("Ignoring match (not a directory): %s", match)
else:
yield location, rotation_scheme, options
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
class Backup(PropertyManager):
""":class:`Backup` objects represent a rotation subject."""
key_properties = 'timestamp', 'pathname'
"""
Customize the ordering of :class:`Backup` objects.
:class:`Backup` objects are ordered first by their :attr:`timestamp` and
second by their :attr:`pathname`. This class variable overrides
:attr:`~property_manager.PropertyManager.key_properties`.
"""
@key_property
def pathname(self):
"""The pathname of the backup (a string)."""
@key_property
def timestamp(self):
"""The date and time when the backup was created (a :class:`~datetime.datetime` object)."""
@property
def week(self):
"""The ISO week number of :attr:`timestamp` (a number)."""
return self.timestamp.isocalendar()[1]
def __getattr__(self, name):
"""Defer attribute access to :attr:`timestamp`."""
return getattr(self.timestamp, name)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.rotate_concurrent
|
python
|
def rotate_concurrent(self, *locations, **kw):
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
|
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L403-L431
| null |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.rotate_backups
|
python
|
def rotate_backups(self, location, load_config=True, prepare=False):
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
|
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L433-L512
|
[
"def coerce_location(value, **options):\n \"\"\"\n Coerce a string to a :class:`Location` object.\n\n :param value: The value to coerce (a string or :class:`Location` object).\n :param options: Any keyword arguments are passed on to\n :func:`~executor.contexts.create_context()`.\n :returns: A :class:`Location` object.\n \"\"\"\n # Location objects pass through untouched.\n if not isinstance(value, Location):\n # Other values are expected to be strings.\n if not isinstance(value, string_types):\n msg = \"Expected Location object or string, got %s instead!\"\n raise ValueError(msg % type(value))\n # Try to parse a remote location.\n ssh_alias, _, directory = value.partition(':')\n if ssh_alias and directory and '/' not in ssh_alias:\n options['ssh_alias'] = ssh_alias\n else:\n directory = value\n # Create the location object.\n value = Location(\n context=create_context(**options),\n directory=parse_path(directory),\n )\n return value\n"
] |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.load_config_file
|
python
|
def load_config_file(self, location):
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
|
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L514-L544
|
[
"def coerce_location(value, **options):\n \"\"\"\n Coerce a string to a :class:`Location` object.\n\n :param value: The value to coerce (a string or :class:`Location` object).\n :param options: Any keyword arguments are passed on to\n :func:`~executor.contexts.create_context()`.\n :returns: A :class:`Location` object.\n \"\"\"\n # Location objects pass through untouched.\n if not isinstance(value, Location):\n # Other values are expected to be strings.\n if not isinstance(value, string_types):\n msg = \"Expected Location object or string, got %s instead!\"\n raise ValueError(msg % type(value))\n # Try to parse a remote location.\n ssh_alias, _, directory = value.partition(':')\n if ssh_alias and directory and '/' not in ssh_alias:\n options['ssh_alias'] = ssh_alias\n else:\n directory = value\n # Create the location object.\n value = Location(\n context=create_context(**options),\n directory=parse_path(directory),\n )\n return value\n",
"def load_config_file(configuration_file=None, expand=True):\n \"\"\"\n Load a configuration file with backup directories and rotation schemes.\n\n :param configuration_file: Override the pathname of the configuration file\n to load (a string or :data:`None`).\n :param expand: :data:`True` to expand filename patterns to their matches,\n :data:`False` otherwise.\n :returns: A generator of tuples with four values each:\n\n 1. An execution context created using :mod:`executor.contexts`.\n 2. The pathname of a directory with backups (a string).\n 3. A dictionary with the rotation scheme.\n 4. A dictionary with additional options.\n :raises: :exc:`~exceptions.ValueError` when `configuration_file` is given\n but doesn't exist or can't be loaded.\n\n This function is used by :class:`RotateBackups` to discover user defined\n rotation schemes and by :mod:`rotate_backups.cli` to discover directories\n for which backup rotation is configured. When `configuration_file` isn't\n given :class:`~update_dotdee.ConfigLoader` is used to search for\n configuration files in the following locations:\n\n - ``/etc/rotate-backups.ini`` and ``/etc/rotate-backups.d/*.ini``\n - ``~/.rotate-backups.ini`` and ``~/.rotate-backups.d/*.ini``\n - ``~/.config/rotate-backups.ini`` and ``~/.config/rotate-backups.d/*.ini``\n\n All of the available configuration files are loaded in the order given\n above, so that sections in user-specific configuration files override\n sections by the same name in system-wide configuration files.\n \"\"\"\n expand_notice_given = False\n if configuration_file:\n loader = ConfigLoader(available_files=[configuration_file], strict=True)\n else:\n loader = ConfigLoader(program_name='rotate-backups', strict=False)\n for section in loader.section_names:\n items = dict(loader.get_options(section))\n context_options = {}\n if coerce_boolean(items.get('use-sudo')):\n context_options['sudo'] = True\n if items.get('ssh-user'):\n context_options['ssh_user'] = items['ssh-user']\n location = coerce_location(section, **context_options)\n rotation_scheme = dict((name, coerce_retention_period(items[name]))\n for name in SUPPORTED_FREQUENCIES\n if name in items)\n options = dict(include_list=split(items.get('include-list', '')),\n exclude_list=split(items.get('exclude-list', '')),\n io_scheduling_class=items.get('ionice'),\n strict=coerce_boolean(items.get('strict', 'yes')),\n prefer_recent=coerce_boolean(items.get('prefer-recent', 'no')))\n # Don't override the value of the 'removal_command' property unless the\n # 'removal-command' configuration file option has a value set.\n if items.get('removal-command'):\n options['removal_command'] = shlex.split(items['removal-command'])\n # Expand filename patterns?\n if expand and location.have_wildcards:\n logger.verbose(\"Expanding filename pattern %s on %s ..\", location.directory, location.context)\n if location.is_remote and not expand_notice_given:\n logger.notice(\"Expanding remote filename patterns (may be slow) ..\")\n expand_notice_given = True\n for match in sorted(location.context.glob(location.directory)):\n if location.context.is_directory(match):\n logger.verbose(\"Matched directory: %s\", match)\n expanded = Location(context=location.context, directory=match)\n yield expanded, rotation_scheme, options\n else:\n logger.verbose(\"Ignoring match (not a directory): %s\", match)\n else:\n yield location, rotation_scheme, options\n"
] |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.collect_backups
|
python
|
def collect_backups(self, location):
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
|
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L546-L579
|
[
"def coerce_location(value, **options):\n \"\"\"\n Coerce a string to a :class:`Location` object.\n\n :param value: The value to coerce (a string or :class:`Location` object).\n :param options: Any keyword arguments are passed on to\n :func:`~executor.contexts.create_context()`.\n :returns: A :class:`Location` object.\n \"\"\"\n # Location objects pass through untouched.\n if not isinstance(value, Location):\n # Other values are expected to be strings.\n if not isinstance(value, string_types):\n msg = \"Expected Location object or string, got %s instead!\"\n raise ValueError(msg % type(value))\n # Try to parse a remote location.\n ssh_alias, _, directory = value.partition(':')\n if ssh_alias and directory and '/' not in ssh_alias:\n options['ssh_alias'] = ssh_alias\n else:\n directory = value\n # Create the location object.\n value = Location(\n context=create_context(**options),\n directory=parse_path(directory),\n )\n return value\n"
] |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.group_backups
|
python
|
def group_backups(self, backups):
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
|
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L581-L601
| null |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.apply_rotation_scheme
|
python
|
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
|
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L603-L649
| null |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def find_preservation_criteria(self, backups_by_frequency):
"""
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
"""
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
RotateBackups.find_preservation_criteria
|
python
|
def find_preservation_criteria(self, backups_by_frequency):
backups_to_preserve = collections.defaultdict(list)
for frequency, delta in ORDERED_FREQUENCIES:
for period in backups_by_frequency[frequency].values():
for backup in period:
backups_to_preserve[backup].append(frequency)
return backups_to_preserve
|
Collect the criteria used to decide which backups to preserve.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()` which has been
processed by :func:`apply_rotation_scheme()`.
:returns: A :class:`dict` with :class:`Backup` objects as keys and
:class:`list` objects containing strings (rotation
frequencies) as values.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L651-L667
| null |
class RotateBackups(PropertyManager):
"""Python API for the ``rotate-backups`` program."""
def __init__(self, rotation_scheme, **options):
"""
Initialize a :class:`RotateBackups` object.
:param rotation_scheme: Used to set :attr:`rotation_scheme`.
:param options: Any keyword arguments are used to set the values of
instance properties that support assignment
(:attr:`config_file`, :attr:`dry_run`,
:attr:`exclude_list`, :attr:`include_list`,
:attr:`io_scheduling_class`, :attr:`removal_command`
and :attr:`strict`).
"""
options.update(rotation_scheme=rotation_scheme)
super(RotateBackups, self).__init__(**options)
@mutable_property
def config_file(self):
"""
The pathname of a configuration file (a string or :data:`None`).
When this property is set :func:`rotate_backups()` will use
:func:`load_config_file()` to give the user (operator) a chance to set
the rotation scheme and other options via a configuration file.
"""
@mutable_property
def dry_run(self):
"""
:data:`True` to simulate rotation, :data:`False` to actually remove backups (defaults to :data:`False`).
If this is :data:`True` then :func:`rotate_backups()` won't make any
actual changes, which provides a 'preview' of the effect of the
rotation scheme. Right now this is only useful in the command line
interface because there's no return value.
"""
return False
@cached_property(writable=True)
def exclude_list(self):
"""
Filename patterns to exclude specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When
:func:`collect_backups()` encounters a backup whose name matches any of
the patterns in this list the backup will be ignored, *even if it also
matches the include list* (it's the only logical way to combine both
lists).
:see also: :attr:`include_list`
"""
return []
@cached_property(writable=True)
def include_list(self):
"""
Filename patterns to select specific backups (a list of strings).
This is a list of strings with :mod:`fnmatch` patterns. When it's not
empty :func:`collect_backups()` will only collect backups whose name
matches a pattern in the list.
:see also: :attr:`exclude_list`
"""
return []
@mutable_property
def io_scheduling_class(self):
"""
The I/O scheduling class for backup rotation (a string or :data:`None`).
When this property is set (and :attr:`~Location.have_ionice` is
:data:`True`) then ionice_ will be used to set the I/O scheduling class
for backup rotation. This can be useful to reduce the impact of backup
rotation on the rest of the system.
The value of this property is expected to be one of the strings 'idle',
'best-effort' or 'realtime'.
.. _ionice: https://linux.die.net/man/1/ionice
"""
@mutable_property
def prefer_recent(self):
"""
Whether to prefer older or newer backups in each time slot (a boolean).
Defaults to :data:`False` which means the oldest backup in each time
slot (an hour, a day, etc.) is preserved while newer backups in the
time slot are removed. You can set this to :data:`True` if you would
like to preserve the newest backup in each time slot instead.
"""
return False
@mutable_property
def removal_command(self):
"""
The command used to remove backups (a list of strings).
By default the command ``rm -fR`` is used. This choice was made because
it works regardless of whether the user's "backups to be rotated" are
files or directories or a mixture of both.
.. versionadded: 5.3
This option was added as a generalization of the idea suggested in
`pull request 11`_, which made it clear to me that being able to
customize the removal command has its uses.
.. _pull request 11: https://github.com/xolox/python-rotate-backups/pull/11
"""
return ['rm', '-fR']
@required_property
def rotation_scheme(self):
"""
The rotation scheme to apply to backups (a dictionary).
Each key in this dictionary defines a rotation frequency (one of the
strings 'minutely', 'hourly', 'daily', 'weekly', 'monthly' and
'yearly') and each value defines a retention count:
- An integer value represents the number of backups to preserve in the
given rotation frequency, starting from the most recent backup and
counting back in time.
- The string 'always' means all backups in the given rotation frequency
are preserved (this is intended to be used with the biggest frequency
in the rotation scheme, e.g. yearly).
No backups are preserved for rotation frequencies that are not present
in the dictionary.
"""
@mutable_property
def strict(self):
"""
Whether to enforce the time window for each rotation frequency (a boolean, defaults to :data:`True`).
The easiest way to explain the difference between strict and relaxed
rotation is using an example:
- If :attr:`strict` is :data:`True` and the number of hourly backups to
preserve is three, only backups created in the relevant time window
(the hour of the most recent backup and the two hours leading up to
that) will match the hourly frequency.
- If :attr:`strict` is :data:`False` then the three most recent backups
will all match the hourly frequency (and thus be preserved),
regardless of the calculated time window.
If the explanation above is not clear enough, here's a simple way to
decide whether you want to customize this behavior:
- If your backups are created at regular intervals and you never miss
an interval then the default (:data:`True`) is most likely fine.
- If your backups are created at irregular intervals then you may want
to set :attr:`strict` to :data:`False` to convince
:class:`RotateBackups` to preserve more backups.
"""
return True
def rotate_concurrent(self, *locations, **kw):
"""
Rotate the backups in the given locations concurrently.
:param locations: One or more values accepted by :func:`coerce_location()`.
:param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.
This function uses :func:`rotate_backups()` to prepare rotation
commands for the given locations and then it removes backups in
parallel, one backup per mount point at a time.
The idea behind this approach is that parallel rotation is most useful
when the files to be removed are on different disks and so multiple
devices can be utilized at the same time.
Because mount points are per system :func:`rotate_concurrent()` will
also parallelize over backups located on multiple remote systems.
"""
timer = Timer()
pool = CommandPool(concurrency=10)
logger.info("Scanning %s ..", pluralize(len(locations), "backup location"))
for location in locations:
for cmd in self.rotate_backups(location, prepare=True, **kw):
pool.add(cmd)
if pool.num_commands > 0:
backups = pluralize(pool.num_commands, "backup")
logger.info("Preparing to rotate %s (in parallel) ..", backups)
pool.run()
logger.info("Successfully rotated %s in %s.", backups, timer)
def rotate_backups(self, location, load_config=True, prepare=False):
"""
Rotate the backups in a directory according to a flexible rotation scheme.
:param location: Any value accepted by :func:`coerce_location()`.
:param load_config: If :data:`True` (so by default) the rotation scheme
and other options can be customized by the user in
a configuration file. In this case the caller's
arguments are only used when the configuration file
doesn't define a configuration for the location.
:param prepare: If this is :data:`True` (not the default) then
:func:`rotate_backups()` will prepare the required
rotation commands without running them.
:returns: A list with the rotation commands
(:class:`~executor.ExternalCommand` objects).
:raises: :exc:`~exceptions.ValueError` when the given location doesn't
exist, isn't readable or isn't writable. The third check is
only performed when dry run isn't enabled.
This function binds the main methods of the :class:`RotateBackups`
class together to implement backup rotation with an easy to use Python
API. If you're using `rotate-backups` as a Python API and the default
behavior is not satisfactory, consider writing your own
:func:`rotate_backups()` function based on the underlying
:func:`collect_backups()`, :func:`group_backups()`,
:func:`apply_rotation_scheme()` and
:func:`find_preservation_criteria()` methods.
"""
rotation_commands = []
location = coerce_location(location)
# Load configuration overrides by user?
if load_config:
location = self.load_config_file(location)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(location)
if not sorted_backups:
logger.info("No backups found in %s.", location)
return
# Make sure the directory is writable.
if not self.dry_run:
location.ensure_writable()
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
for backup in sorted_backups:
friendly_name = backup.pathname
if not location.is_remote:
# Use human friendly pathname formatting for local backups.
friendly_name = format_path(backup.pathname)
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
friendly_name, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods")
else:
logger.info("Deleting %s ..", friendly_name)
if not self.dry_run:
# Copy the list with the (possibly user defined) removal command.
removal_command = list(self.removal_command)
# Add the pathname of the backup as the final argument.
removal_command.append(backup.pathname)
# Construct the command object.
command = location.context.prepare(
command=removal_command,
group_by=(location.ssh_alias, location.mount_point),
ionice=self.io_scheduling_class,
)
rotation_commands.append(command)
if not prepare:
timer = Timer()
command.wait()
logger.verbose("Deleted %s in %s.", friendly_name, timer)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
return rotation_commands
def load_config_file(self, location):
"""
Load a rotation scheme and other options from a configuration file.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: The configured or given :class:`Location` object.
"""
location = coerce_location(location)
for configured_location, rotation_scheme, options in load_config_file(self.config_file, expand=False):
if configured_location.match(location):
logger.verbose("Loading configuration for %s ..", location)
if rotation_scheme:
self.rotation_scheme = rotation_scheme
for name, value in options.items():
if value:
setattr(self, name, value)
# Create a new Location object based on the directory of the
# given location and the execution context of the configured
# location, because:
#
# 1. The directory of the configured location may be a filename
# pattern whereas we are interested in the expanded name.
#
# 2. The execution context of the given location may lack some
# details of the configured location.
return Location(
context=configured_location.context,
directory=location.directory,
)
logger.verbose("No configuration found for %s.", location)
return location
def collect_backups(self, location):
"""
Collect the backups at the given location.
:param location: Any value accepted by :func:`coerce_location()`.
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
:raises: :exc:`~exceptions.ValueError` when the given directory doesn't
exist or isn't readable.
"""
backups = []
location = coerce_location(location)
logger.info("Scanning %s for backups ..", location)
location.ensure_readable()
for entry in natsort(location.context.list_entries(location.directory)):
match = TIMESTAMP_PATTERN.search(entry)
if match:
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.verbose("Excluded %s (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.verbose("Excluded %s (it didn't match the include list).", entry)
else:
try:
backups.append(Backup(
pathname=os.path.join(location.directory, entry),
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
except ValueError as e:
logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), location)
return sorted(backups)
def group_backups(self, backups):
"""
Group backups collected by :func:`collect_backups()` by rotation frequencies.
:param backups: A :class:`set` of :class:`Backup` objects.
:returns: A :class:`dict` whose keys are the names of rotation
frequencies ('hourly', 'daily', etc.) and whose values are
dictionaries. Each nested dictionary contains lists of
:class:`Backup` objects that are grouped together because
they belong into the same time unit for the corresponding
rotation frequency.
"""
backups_by_frequency = dict((frequency, collections.defaultdict(list)) for frequency in SUPPORTED_FREQUENCIES)
for b in backups:
backups_by_frequency['minutely'][(b.year, b.month, b.day, b.hour, b.minute)].append(b)
backups_by_frequency['hourly'][(b.year, b.month, b.day, b.hour)].append(b)
backups_by_frequency['daily'][(b.year, b.month, b.day)].append(b)
backups_by_frequency['weekly'][(b.year, b.week)].append(b)
backups_by_frequency['monthly'][(b.year, b.month)].append(b)
backups_by_frequency['yearly'][b.year].append(b)
return backups_by_frequency
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup):
"""
Apply the user defined rotation scheme to the result of :func:`group_backups()`.
:param backups_by_frequency: A :class:`dict` in the format generated by
:func:`group_backups()`.
:param most_recent_backup: The :class:`~datetime.datetime` of the most
recent backup.
:raises: :exc:`~exceptions.ValueError` when the rotation scheme
dictionary is empty (this would cause all backups to be
deleted).
.. note:: This method mutates the given data structure by removing all
backups that should be removed to apply the user defined
rotation scheme.
"""
if not self.rotation_scheme:
raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)")
for frequency, backups in backups_by_frequency.items():
# Ignore frequencies not specified by the user.
if frequency not in self.rotation_scheme:
backups.clear()
else:
# Reduce the number of backups in each time slot of this
# rotation frequency to a single backup (the oldest one or the
# newest one).
for period, backups_in_period in backups.items():
index = -1 if self.prefer_recent else 0
selected_backup = sorted(backups_in_period)[index]
backups[period] = [selected_backup]
# Check if we need to rotate away backups in old periods.
retention_period = self.rotation_scheme[frequency]
if retention_period != 'always':
# Remove backups created before the minimum date of this
# rotation frequency? (relative to the most recent backup)
if self.strict:
minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period
for period, backups_in_period in list(backups.items()):
for backup in backups_in_period:
if backup.timestamp < minimum_date:
backups_in_period.remove(backup)
if not backups_in_period:
backups.pop(period)
# If there are more periods remaining than the user
# requested to be preserved we delete the oldest one(s).
items_to_preserve = sorted(backups.items())[-retention_period:]
backups_by_frequency[frequency] = dict(items_to_preserve)
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
Location.mount_point
|
python
|
def mount_point(self):
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
|
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L693-L705
| null |
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
Location.ensure_exists
|
python
|
def ensure_exists(self):
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
|
Make sure the location exists.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L729-L744
| null |
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
Location.ensure_readable
|
python
|
def ensure_readable(self):
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
|
Make sure the location exists and is readable.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L746-L758
| null |
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_writable(self):
"""Make sure the directory exists and is writable."""
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
|
xolox/python-rotate-backups
|
rotate_backups/__init__.py
|
Location.ensure_writable
|
python
|
def ensure_writable(self):
self.ensure_exists()
if not self.context.is_writable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't writable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't writable, most likely due
to permissions. Consider using the --use-sudo option.
""", location=self))
|
Make sure the directory exists and is writable.
|
train
|
https://github.com/xolox/python-rotate-backups/blob/611c72b2806952bf2bb84c38a4b5f856ea334707/rotate_backups/__init__.py#L760-L771
| null |
class Location(PropertyManager):
""":class:`Location` objects represent a root directory containing backups."""
@required_property
def context(self):
"""An execution context created using :mod:`executor.contexts`."""
@required_property
def directory(self):
"""The pathname of a directory containing backups (a string)."""
@lazy_property
def have_ionice(self):
""":data:`True` when ionice_ is available, :data:`False` otherwise."""
return self.context.have_ionice
@lazy_property
def have_wildcards(self):
""":data:`True` if :attr:`directory` is a filename pattern, :data:`False` otherwise."""
return '*' in self.directory
@lazy_property
def mount_point(self):
"""
The pathname of the mount point of :attr:`directory` (a string or :data:`None`).
If the ``stat --format=%m ...`` command that is used to determine the
mount point fails, the value of this property defaults to :data:`None`.
This enables graceful degradation on e.g. Mac OS X whose ``stat``
implementation is rather bare bones compared to GNU/Linux.
"""
try:
return self.context.capture('stat', '--format=%m', self.directory, silent=True)
except ExternalCommandFailed:
return None
@lazy_property
def is_remote(self):
""":data:`True` if the location is remote, :data:`False` otherwise."""
return isinstance(self.context, RemoteContext)
@lazy_property
def ssh_alias(self):
"""The SSH alias of a remote location (a string or :data:`None`)."""
return self.context.ssh_alias if self.is_remote else None
@property
def key_properties(self):
"""
A list of strings with the names of the :attr:`~custom_property.key` properties.
Overrides :attr:`~property_manager.PropertyManager.key_properties` to
customize the ordering of :class:`Location` objects so that they are
ordered first by their :attr:`ssh_alias` and second by their
:attr:`directory`.
"""
return ['ssh_alias', 'directory'] if self.is_remote else ['directory']
def ensure_exists(self):
"""Make sure the location exists."""
if not self.context.is_directory(self.directory):
# This can also happen when we don't have permission to one of the
# parent directories so we'll point that out in the error message
# when it seems applicable (so as not to confuse users).
if self.context.have_superuser_privileges:
msg = "The directory %s doesn't exist!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't accessible, most likely
because it doesn't exist or because of permissions. If
you're sure the directory exists you can use the
--use-sudo option.
""", location=self))
def ensure_readable(self):
"""Make sure the location exists and is readable."""
self.ensure_exists()
if not self.context.is_readable(self.directory):
if self.context.have_superuser_privileges:
msg = "The directory %s isn't readable!"
raise ValueError(msg % self)
else:
raise ValueError(compact("""
The directory {location} isn't readable, most likely
because of permissions. Consider using the --use-sudo
option.
""", location=self))
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other
def __str__(self):
"""Render a simple human readable representation of a location."""
return '%s:%s' % (self.ssh_alias, self.directory) if self.ssh_alias else self.directory
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.